diff --git a/CP5/active_plugins/calculatemoments.py b/CP5/active_plugins/calculatemoments.py
new file mode 100644
index 00000000..3e5ae753
--- /dev/null
+++ b/CP5/active_plugins/calculatemoments.py
@@ -0,0 +1,442 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import numpy as np
+import scipy.ndimage as scind
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import cellprofiler_core.module as cpm
+import cellprofiler_core.measurement as cpmeas
+import cellprofiler_core.object as cpo
+import cellprofiler_core.setting as cps
+from cellprofiler_core.constants.measurement import COLTYPE_FLOAT
+from cellprofiler_core.setting.do_something import DoSomething
+from cellprofiler_core.setting.multichoice import MultiChoice
+from cellprofiler_core.setting.subscriber import ImageSubscriber, LabelSubscriber
+from cellprofiler_core.utilities.core.object import size_similarly
+
+__doc__ = """\
+CalculateMoments
+================
+
+**CalculateMoments** extracts moments statistics from a given distribution of pixel values.
+
+This module extracts a collection of quantitative measures of the shape of a distribution of pixel values.
+The user can use all pixels to compute the moments or can restrict to pixels within objects.
+If the image has a mask, only unmasked pixels will be used.
+
+Available measurements:
+- Mean
+- Standard deviation (computed using the unbiased estimator)
+- Skewness (the third moment about the mean, scaled by the standard deviation to the third power)
+- Kurtosis (the fourth moment about the mean, scaled by the standard deviation to the fourth power)
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES NO YES
+============ ============ ===============
+
+"""
+
+
+def get_object_moment(pixels, func):
+ labs = np.unique(pixels)
+ moms = np.zeros([np.max(labs) + 1, 1])
+ for l in labs:
+ if l != 0:
+ px = pixels[np.where(pixels == l)]
+ moms[l] = func(px)
+ return moms
+
+
+def mean(pixels):
+ return np.mean(pixels)
+
+
+def std(pixels):
+ return np.std(pixels, ddof=1)
+
+
+def skewness(pixels):
+ if len(pixels) == 0:
+ return 0
+
+ pixels = np.array(pixels, dtype="float64")
+ mean = np.mean(pixels)
+
+ num = np.sum(np.power(pixels - mean, 3))
+ # num=num/(len(pixels)*len(pixels[0]))
+ num = num / pixels.size
+ denom = np.std(pixels)
+
+ if denom == 0.0:
+ skew = 0.0
+ else:
+ skew = num / (denom * denom * denom)
+ return skew
+
+
+def kurtosis(pixels):
+ if len(pixels) == 0:
+ return 0
+
+ pixels = np.array(pixels, dtype="float64")
+ mean = np.mean(pixels)
+
+ num = np.sum(np.power(pixels - mean, 4))
+ # num=num/(len(pixels)*len(pixels[0]))
+ num = num / pixels.size
+ denom = np.std(pixels)
+
+ if denom == 0.0:
+ kurt = 0.0
+ else:
+ kurt = num / (denom * denom * denom * denom)
+ return kurt
+
+
+"""The category of the measurements made by this module"""
+MOMENTS = "Moments"
+
+MOM_1 = "Mean"
+MOM_2 = "Standard Deviation"
+MOM_3 = "Skewness"
+MOM_4 = "Kurtosis"
+MOM_ALL = [MOM_1, MOM_2, MOM_3, MOM_4]
+
+MOM_TO_F = {MOM_1: mean, MOM_2: std, MOM_3: skewness, MOM_4: kurtosis}
+
+
+class CalculateMoments(cpm.Module):
+
+ module_name = "CalculateMoments"
+ category = "Measurement"
+ variable_revision_number = 1
+
+ def create_settings(self):
+ """Create the settings for the module at startup."""
+ self.image_groups = []
+ self.image_count = cps.HiddenCount(self.image_groups)
+ self.add_image_cb(can_remove=False)
+ self.add_images = DoSomething("", "Add another image", self.add_image_cb)
+ self.image_divider = cps.Divider()
+
+ self.object_groups = []
+ self.object_count = cps.HiddenCount(self.object_groups)
+ self.add_object_cb(can_remove=True)
+ self.add_objects = DoSomething("", "Add another object", self.add_object_cb)
+ self.object_divider = cps.Divider()
+
+ self.moms = MultiChoice(
+ "Moments to compute",
+ MOM_ALL,
+ MOM_ALL,
+ doc="""Moments are statistics describing the distribution of values in the set of pixels of interest:
+
+ - %(MOM_1)s - the first image moment, which corresponds to the central value of the collection of pixels of interest.
+ - %(MOM_2)s - the second image moment, which measures the amount of variation or dispersion of pixel values about its mean.
+ - %(MOM_3)s - a scaled version of the third moment, which measures the asymmetry of the pixel values distribution about its mean.
+ - %(MOM_4)s - a scaled version of the fourth moment, which measures the "peakedness" of the pixel values distribution.
+
+ Choose one or more moments to measure."""
+ % globals(),
+ )
+
+ def settings(self):
+ """The settings as they appear in the save file."""
+ result = [self.image_count, self.object_count]
+ for groups, elements in [
+ (self.image_groups, ["image_name"]),
+ (self.object_groups, ["object_name"]),
+ ]:
+ for group in groups:
+ for element in elements:
+ result += [getattr(group, element)]
+ result += [self.moms]
+ return result
+
+ def prepare_settings(self, setting_values):
+ """Adjust the number of groups based on the number of
+ setting_values"""
+ for count, sequence, fn in (
+ (int(setting_values[0]), self.image_groups, self.add_image_cb),
+ (int(setting_values[1]), self.object_groups, self.add_object_cb),
+ ):
+ del sequence[count:]
+ while len(sequence) < count:
+ fn()
+
+ def visible_settings(self):
+ """The settings as they appear in the module viewer"""
+ result = []
+ for groups, add_button, div in [
+ (self.image_groups, self.add_images, self.image_divider),
+ (self.object_groups, self.add_objects, self.object_divider),
+ ]:
+ for group in groups:
+ result += group.visible_settings()
+ result += [add_button, div]
+
+ result += [self.moms]
+ return result
+
+ def add_image_cb(self, can_remove=True):
+ """Add an image to the image_groups collection
+
+ can_delete - set this to False to keep from showing the "remove"
+ button for images that must be present.
+ """
+ group = cps.SettingsGroup()
+ if can_remove:
+ group.append("divider", cps.Divider(line=False))
+ group.append(
+ "image_name",
+ ImageSubscriber(
+ "Select an image to measure",
+ "None",
+ doc="""
+ What did you call the grayscale images whose moments you want to calculate?""",
+ ),
+ )
+ if can_remove:
+ group.append(
+ "remover",
+ cps.do_something.RemoveSettingButton(
+ "", "Remove this image", self.image_groups, group
+ ),
+ )
+ self.image_groups.append(group)
+
+ def add_object_cb(self, can_remove=True):
+ """Add an object to the object_groups collection
+
+ can_delete - set this to False to keep from showing the "remove"
+ button for objects that must be present.
+ """
+ group = cps.SettingsGroup()
+ if can_remove:
+ group.append("divider", cps.Divider(line=False))
+ group.append(
+ "object_name",
+ LabelSubscriber(
+ "Select objects to measure",
+ "None",
+ doc="""
+ What did you call the objects from which you want to calculate moments?
+ If you only want to calculate moments of
+ the image overall, you can remove all objects using the "Remove this object" button.
+ Objects specified here will have moments computed against *all* images specified above, which
+ may lead to image-object combinations that are unnecessary. If you
+ do not want this behavior, use multiple CalculateMoments
+ modules to specify the particular image-object measures that you want.""",
+ ),
+ )
+ if can_remove:
+ group.append(
+ "remover",
+ cps.do_something.RemoveSettingButton(
+ "", "Remove this object", self.object_groups, group
+ ),
+ )
+ self.object_groups.append(group)
+
+ def validate_module(self, pipeline):
+ """Make sure chosen images are selected only once"""
+ images = set()
+ for group in self.image_groups:
+ if group.image_name.value in images:
+ raise cps.ValidationError(
+ "%s has already been selected" % group.image_name.value,
+ group.image_name,
+ )
+ images.add(group.image_name.value)
+
+ objects = set()
+ for group in self.object_groups:
+ if group.object_name.value in objects:
+ raise cps.ValidationError(
+ "%s has already been selected" % group.object_name.value,
+ group.object_name,
+ )
+ objects.add(group.object_name.value)
+
+ def run(self, workspace):
+ """Run, computing the measurements"""
+ statistics = [["Image", "Object", "Measurement", "Value"]]
+
+ for image_group in self.image_groups:
+ image_name = image_group.image_name.value
+ statistics += self.run_image(image_name, workspace)
+ for object_group in self.object_groups:
+ object_name = object_group.object_name.value
+ statistics += self.run_object(image_name, object_name, workspace)
+
+ if workspace.frame is not None:
+ workspace.display_data.statistics = statistics
+
+ def run_image(self, image_name, workspace):
+ """Run measurements on image"""
+ statistics = []
+ input_image = workspace.image_set.get_image(image_name, must_be_grayscale=True)
+ pixels = input_image.pixel_data
+ for name in self.moms.value.split(","):
+ fn = MOM_TO_F[name]
+ value = fn(pixels)
+ statistics += self.record_image_measurement(
+ workspace, image_name, name, value
+ )
+ return statistics
+
+ def run_object(self, image_name, object_name, workspace):
+ statistics = []
+ input_image = workspace.image_set.get_image(image_name, must_be_grayscale=True)
+ objects = workspace.get_objects(object_name)
+ pixels = input_image.pixel_data
+ if input_image.has_mask:
+ mask = input_image.mask
+ else:
+ mask = None
+ labels = objects.segmented
+ try:
+ pixels = objects.crop_image_similarly(pixels)
+ except ValueError:
+ #
+ # Recover by cropping the image to the labels
+ #
+ pixels, m1 = size_similarly(labels, pixels)
+ if np.any(~m1):
+ if mask is None:
+ mask = m1
+ else:
+ mask, m2 = size_similarly(labels, mask)
+ mask[~m2] = False
+
+ if mask is not None:
+ labels = labels.copy()
+ labels[~mask] = 0
+
+ for name in self.moms.value.split(","):
+ fn = MOM_TO_F[name]
+ value = get_object_moment(pixels, fn)
+ statistics += self.record_measurement(
+ workspace, image_name, object_name, name, value
+ )
+ return statistics
+
+ def is_interactive(self):
+ return False
+
+ def display(self, workspace):
+ statistics = workspace.display_data.statistics
+ figure = workspace.create_or_find_figure(
+ title="CalculateMoments, image cycle #%d"
+ % (workspace.measurements.image_set_number),
+ subplots=(1, 1),
+ )
+ figure.subplot_table(0, 0, statistics, ratio=(0.25, 0.25, 0.25, 0.25))
+
+ def get_features(self):
+ """Return a measurement feature name"""
+ return MOM_ALL
+
+ def get_measurement_columns(self, pipeline):
+ """Get column names output for each measurement."""
+ cols = []
+ for im in self.image_groups:
+ for feature in self.get_features():
+ cols += [
+ (
+ "Image",
+ "%s_%s_%s" % (MOMENTS, feature, im.image_name.value),
+ COLTYPE_FLOAT,
+ )
+ ]
+
+ for ob in self.object_groups:
+ for im in self.image_groups:
+ for feature in self.get_features():
+ cols += [
+ (
+ ob.object_name.value,
+ "%s_%s_%s" % (MOMENTS, feature, im.image_name.value),
+ COLTYPE_FLOAT,
+ )
+ ]
+
+ return cols
+
+ def get_categories(self, pipeline, object_name):
+ """Get the measurement categories.
+
+ pipeline - pipeline being run
+ image_name - name of images in question
+ returns a list of category names
+ """
+ if any([object_name == og.object_name for og in self.object_groups]):
+ return [MOMENTS]
+ elif object_name == "Image":
+ return [MOMENTS]
+ else:
+ return []
+
+ def get_measurements(self, pipeline, object_name, category):
+ """Get the measurements made on the given image in the given category
+
+ pipeline - pipeline being run
+ image_name - name of image being measured
+ category - measurement category
+ """
+ if category in self.get_categories(pipeline, object_name):
+ return self.get_features()
+ return []
+
+ def get_measurement_images(self, pipeline, object_name, category, measurement):
+ """Get the list of images measured
+
+ pipeline - pipeline being run
+ image_name - name of objects being measured
+ category - measurement category
+ measurement - measurement made on images
+ """
+ if measurement in self.get_measurements(pipeline, object_name, category):
+ return [x.image_name.value for x in self.image_groups]
+ return []
+
+ def record_measurement(
+ self, workspace, image_name, object_name, feature_name, result
+ ):
+ """Record the result of a measurement in the workspace's
+ measurements"""
+ # Todo: The line below previous referred to "fix", assumed this meant the numpy version
+ data = np.fix(result)
+ data[~np.isfinite(data)] = 0
+ workspace.add_measurement(
+ object_name, "%s_%s_%s" % (MOMENTS, feature_name, image_name), data
+ )
+ statistics = [
+ [image_name, object_name, feature_name, "%f" % (d) if len(data) else "-"]
+ for d in data
+ ]
+ return statistics
+
+ def record_image_measurement(self, workspace, image_name, feature_name, result):
+ """Record the result of a measurement in the workspace's
+ measurements"""
+ if not np.isfinite(result):
+ result = 0
+ workspace.measurements.add_image_measurement(
+ "%s_%s_%s" % (MOMENTS, feature_name, image_name), result
+ )
+ statistics = [[image_name, "-", feature_name, "%f" % (result)]]
+ return statistics
diff --git a/CP5/active_plugins/callbarcodes.py b/CP5/active_plugins/callbarcodes.py
new file mode 100644
index 00000000..427eea40
--- /dev/null
+++ b/CP5/active_plugins/callbarcodes.py
@@ -0,0 +1,660 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import csv
+import numpy
+import os
+import re
+import urllib.request, urllib.error, urllib.parse
+
+try:
+ from io import StringIO
+except ImportError:
+ from io import StringIO
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+from cellprofiler.modules import _help
+import cellprofiler_core.image
+import cellprofiler_core.module
+import cellprofiler_core.measurement
+import cellprofiler_core.object
+import cellprofiler_core.setting
+import cellprofiler_core.constants.setting
+import cellprofiler_core.setting.text
+import cellprofiler_core.setting.choice
+import cellprofiler_core.setting.subscriber
+import cellprofiler_core.utilities.image
+import cellprofiler_core.preferences
+import cellprofiler_core.constants.measurement
+
+__doc__ = """\
+CallBarcodes
+============
+**CallBarcodes** is used for assigning a barcode to an object based on the channel with the strongest intensity for a given number of cycles.
+It is used for optical sequencing by synthesis (SBS).
+
+What do I need as input?
+^^^^^^^^^^^^^^^^^^^^^^^^
+You need to input a .csv file that contains at least two columns.
+One column contains the known barcodes that you will be matching against.
+One column contains the corresponding gene/transcript names.
+All other columns in the .csv will be ignored.
+
+Before running this module in your pipeline, you need to identify the objects in which you will be calling your barcodes and you will need to have measured the intensities of each object in four channels corresponding to nucleotides A,C,T, and G.
+If the background intensities of your four channels are not very well matched, you might want to run the **CompensateColors** module before measuring the object intensities.
+
+What do I get as output?
+^^^^^^^^^^^^^^^^^^^^^^^^
+To be added
+
+Measurements made by this module
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Within the InputObject.csv, this module outputs the following measurements:
+- BarcodeCalled is the n-cycle string of the barcode sequence that was read by the module
+- MatchedTo_Barcode is the known barcode that the module best matched to the called barcode
+- MatchedTo_ID is an ID number assigned to each known barcode
+- MatchedTo_GeneCode is the known gene/transcript name that corresponds to the known barcode
+- MatchedTo_Score is the quality of the called barcode to known barcode match, reported as (matching nucleotides)/(total nucleotides) where 1 is a perfect match
+
+Note that CellProfiler cannot create a per-parent mean measurement of a string.
+
+References
+^^^^^^^^^^
+Optical Pooled Screens in Human Cells.
+Feldman D, Singh A, Schmid-Burgk JL, Carlson RJ, Mezger A, Garrity AJ, Zhang F, Blainey PC.
+Cell. 2019 Oct 17;179(3):787-799.e17. doi: 10.1016/j.cell.2019.09.016.
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES YES
+============ ============ ===============
+
+"""
+
+C_CALL_BARCODES = "Barcode"
+
+
+class CallBarcodes(cellprofiler_core.module.Module):
+
+ module_name = "CallBarcodes"
+ category = "Data Tools"
+ variable_revision_number = 1
+
+ def create_settings(self):
+ self.csv_directory = cellprofiler_core.setting.text.Directory(
+ "Input data file location",
+ allow_metadata=False,
+ doc="""\
+Select the folder containing the CSV file to be loaded. {IO_FOLDER_CHOICE_HELP_TEXT}
+""".format(
+ **{"IO_FOLDER_CHOICE_HELP_TEXT": _help.IO_FOLDER_CHOICE_HELP_TEXT}
+ ),
+ )
+
+ def get_directory_fn():
+ """Get the directory for the CSV file name"""
+ return self.csv_directory.get_absolute_path()
+
+ def set_directory_fn(path):
+ dir_choice, custom_path = self.csv_directory.get_parts_from_path(path)
+ self.csv_directory.join_parts(dir_choice, custom_path)
+
+ self.csv_file_name = cellprofiler_core.setting.text.Filename(
+ "Name of the file",
+ "None",
+ doc="""Provide the file name of the CSV file containing the data you want to load.""",
+ get_directory_fn=get_directory_fn,
+ set_directory_fn=set_directory_fn,
+ browse_msg="Choose CSV file",
+ exts=[("Data file (*.csv)", "*.csv"), ("All files (*.*)", "*.*")],
+ )
+
+ self.input_object_name = cellprofiler_core.setting.subscriber.LabelSubscriber(
+ text="Input object name",
+ doc="These are the objects that the module operates on.",
+ )
+
+ self.ncycles = cellprofiler_core.setting.text.Integer(
+ doc="""\
+Enter the number of cycles present in the data.
+""",
+ text="Number of cycles",
+ value=8,
+ )
+ self.cycle1measure = cellprofiler_core.setting.Measurement(
+ "Select one of the measures from Cycle 1 to use for calling",
+ self.input_object_name.get_value,
+ "AreaShape_Area",
+ doc="""\
+This measurement should be """,
+ )
+
+ self.metadata_field_barcode = cellprofiler_core.setting.choice.Choice(
+ "Select the column of barcodes to match against",
+ ["No CSV file"],
+ choices_fn=self.get_choices,
+ doc="""\Select the column of barcodes to match against.
+""",
+ )
+
+ self.metadata_field_tag = cellprofiler_core.setting.choice.Choice(
+ "Select the column with gene/transcript barcode names",
+ ["No CSV file"],
+ choices_fn=self.get_choices,
+ doc="""\Select the column with gene/transcript barcode names.
+""",
+ )
+
+ self.wants_call_image = cellprofiler_core.setting.Binary(
+ "Retain an image of the barcodes color coded by call?",
+ False,
+ doc="""\
+Select "*{YES}*" to retain the image of the objects color-coded
+according to which line of the CSV their barcode call matches to,
+for use later in the pipeline (for example, to be saved by a **SaveImages**
+module).""".format(
+ **{"YES": "Yes"}
+ ),
+ )
+
+ self.outimage_calls_name = cellprofiler_core.setting.text.ImageName(
+ "Enter the called barcode image name",
+ "None",
+ doc="""\
+*(Used only if the called barcode image is to be retained for later use in the pipeline)*
+
+Enter the name to be given to the called barcode image.""",
+ )
+
+ self.wants_score_image = cellprofiler_core.setting.Binary(
+ "Retain an image of the barcodes color coded by score match?",
+ False,
+ doc="""\
+Select "*{YES}*" to retain the image of the objects where the intensity of the spot matches
+indicates the match score between the called barcode and its closest match,
+for use later in the pipeline (for example, to be saved by a **SaveImages**
+module).""".format(
+ **{"YES": "Yes"}
+ ),
+ )
+
+ self.outimage_score_name = cellprofiler_core.setting.text.ImageName(
+ "Enter the barcode score image name",
+ "None",
+ doc="""\
+*(Used only if the barcode score image is to be retained for later use in the pipeline)*
+
+Enter the name to be given to the barcode score image.""",
+ )
+
+ self.has_empty_vector_barcode = cellprofiler_core.setting.Binary(
+ "Do you have an empty vector barcode you would like to add to the barcode list?",
+ False,
+ doc="""\
+Select "*{YES}*" to manually enter a sequence that should be added to the uploaded barcode
+list with the gene name of "EmptyVector". This can be helpful when there is a consistent
+backbone sequence to look out for in every barcoding set).""".format(
+ **{"YES": "Yes"}
+ ),
+ )
+
+ self.empty_vector_barcode_sequence = cellprofiler_core.setting.text.Text(
+ "What is the empty vector sequence?",
+ "AAAAAAAAAAAAAAA",
+ doc="""\
+Enter the sequence that represents barcoding reads of an empty vector""",
+ )
+
+ def settings(self):
+ return [
+ self.ncycles,
+ self.input_object_name,
+ self.cycle1measure,
+ self.csv_directory,
+ self.csv_file_name,
+ self.metadata_field_barcode,
+ self.metadata_field_tag,
+ self.wants_call_image,
+ self.outimage_calls_name,
+ self.wants_score_image,
+ self.outimage_score_name,
+ self.has_empty_vector_barcode,
+ self.empty_vector_barcode_sequence,
+ ]
+
+ def visible_settings(self):
+ result = [
+ self.ncycles,
+ self.input_object_name,
+ self.cycle1measure,
+ self.csv_directory,
+ self.csv_file_name,
+ self.metadata_field_barcode,
+ self.metadata_field_tag,
+ self.wants_call_image,
+ self.wants_score_image,
+ ]
+
+ if self.wants_call_image:
+ result += [self.outimage_calls_name]
+
+ if self.wants_score_image:
+ result += [self.outimage_score_name]
+
+ if self.has_empty_vector_barcode:
+ result += [self.empty_vector_barcode_sequence]
+
+ return result
+
+ def validate_module(self, pipeline):
+ csv_path = self.csv_path
+
+ if not os.path.isfile(csv_path):
+ raise cellprofiler_core.setting.ValidationError(
+ "No such CSV file: %s" % csv_path, self.csv_file_name
+ )
+
+ try:
+ self.open_csv()
+ except IOError as e:
+ import errno
+
+ if e.errno == errno.EWOULDBLOCK:
+ raise cellprofiler_core.setting.ValidationError(
+ "Another program (Excel?) is locking the CSV file %s."
+ % self.csv_path,
+ self.csv_file_name,
+ )
+ else:
+ raise cellprofiler_core.setting.ValidationError(
+ "Could not open CSV file %s (error: %s)" % (self.csv_path, e),
+ self.csv_file_name,
+ )
+
+ try:
+ self.get_header()
+ except Exception as e:
+ raise cellprofiler_core.setting.ValidationError(
+ "The CSV file, %s, is not in the proper format."
+ " See this module's help for details on CSV format. (error: %s)"
+ % (self.csv_path, e),
+ self.csv_file_name,
+ )
+
+ @property
+ def csv_path(self):
+ """The path and file name of the CSV file to be loaded"""
+ path = self.csv_directory.get_absolute_path()
+ return os.path.join(path, self.csv_file_name.value)
+
+ def open_csv(self, do_not_cache=False):
+ """Open the csv file or URL, returning a file descriptor"""
+
+ print(f"self.csv_path: {self.csv_path}")
+
+ if cellprofiler_core.preferences.is_url_path(self.csv_path):
+ if self.csv_path not in self.header_cache:
+ self.header_cache[self.csv_path] = {}
+
+ entry = self.header_cache[self.csv_path]
+
+ if "URLEXCEPTION" in entry:
+ raise entry["URLEXCEPTION"]
+
+ if "URLDATA" in entry:
+ fd = StringIO(entry["URLDATA"])
+ else:
+ if do_not_cache:
+ raise RuntimeError("Need to fetch URL manually.")
+
+ try:
+ url = cellprofiler_core.utilities.image.generate_presigned_url(
+ self.csv_path
+ )
+ url_fd = urllib.request.urlopen(url)
+ except Exception as e:
+ entry["URLEXCEPTION"] = e
+
+ raise e
+
+ fd = StringIO()
+
+ while True:
+ text = url_fd.read()
+
+ if len(text) == 0:
+ break
+
+ fd.write(text)
+
+ fd.seek(0)
+
+ entry["URLDATA"] = fd.getvalue()
+
+ return fd
+ else:
+ return open(self.csv_path, "r")
+
+ def get_header(self, do_not_cache=False):
+ """Read the header fields from the csv file
+
+ Open the csv file indicated by the settings and read the fields
+ of its first line. These should be the measurement columns.
+ """
+ with open(self.csv_path, "r") as fp:
+ reader = csv.DictReader(fp)
+
+ return reader.fieldnames
+
+ def get_choices(self, pipeline):
+ choices = self.get_header()
+
+ if not choices:
+ choices = ["No CSV file"]
+
+ return choices
+
+ def run(self, workspace):
+
+ measurements = workspace.measurements
+ listofmeasurements = measurements.get_feature_names(
+ self.input_object_name.value
+ )
+
+ measurements_for_calls = self.getallbarcodemeasurements(
+ listofmeasurements, self.ncycles.value, self.cycle1measure.value
+ )
+
+ objectcount = len(
+ measurements.get_current_measurement(
+ self.input_object_name.value, listofmeasurements[0]
+ )
+ )
+
+ calledbarcodes, quality_scores = self.callonebarcode(
+ measurements_for_calls,
+ measurements,
+ self.input_object_name.value,
+ self.ncycles.value,
+ objectcount,
+ )
+
+ workspace.measurements.add_measurement(
+ self.input_object_name.value,
+ "_".join([C_CALL_BARCODES, "BarcodeCalled"]),
+ calledbarcodes,
+ )
+
+ workspace.measurements.add_measurement(
+ self.input_object_name.value,
+ "_".join([C_CALL_BARCODES, "MeanQualityScore"]),
+ quality_scores,
+ )
+
+ barcodes = self.barcodeset(
+ self.metadata_field_barcode.value, self.metadata_field_tag.value
+ )
+
+ cropped_barcode_dict = {
+ y[: self.ncycles.value]: y for y in list(barcodes.keys())
+ }
+
+ scorelist = []
+ matchedbarcode = []
+ matchedbarcodecode = []
+ matchedbarcodeid = []
+ if self.wants_call_image or self.wants_score_image:
+ objects = workspace.object_set.get_objects(self.input_object_name.value)
+ labels = objects.segmented
+ pixel_data_call = objects.segmented
+ pixel_data_score = objects.segmented
+ count = 1
+ for eachbarcode in calledbarcodes:
+ eachscore, eachmatch = self.queryall(cropped_barcode_dict, eachbarcode)
+ scorelist.append(eachscore)
+ matchedbarcode.append(eachmatch)
+ m_id, m_code = barcodes[eachmatch]
+ matchedbarcodeid.append(m_id)
+ matchedbarcodecode.append(m_code)
+ if self.wants_call_image:
+ pixel_data_call = numpy.where(
+ labels == count, barcodes[eachmatch][0], pixel_data_call
+ )
+ if self.wants_score_image:
+ pixel_data_score = numpy.where(
+ labels == count, 65535 * eachscore, pixel_data_score
+ )
+ count += 1
+
+ imagemeanscore = numpy.mean(scorelist)
+
+ workspace.measurements.add_measurement(
+ "Image", "_".join([C_CALL_BARCODES, "MeanBarcodeScore"]), imagemeanscore
+ )
+
+ imagemeanquality = numpy.mean(quality_scores)
+
+ workspace.measurements.add_measurement(
+ "Image", "_".join([C_CALL_BARCODES, "MeanQualityScore"]), imagemeanquality
+ )
+
+ workspace.measurements.add_measurement(
+ self.input_object_name.value,
+ "_".join([C_CALL_BARCODES, "MatchedTo_Barcode"]),
+ matchedbarcode,
+ )
+ workspace.measurements.add_measurement(
+ self.input_object_name.value,
+ "_".join([C_CALL_BARCODES, "MatchedTo_ID"]),
+ matchedbarcodeid,
+ )
+ workspace.measurements.add_measurement(
+ self.input_object_name.value,
+ "_".join([C_CALL_BARCODES, "MatchedTo_GeneCode"]),
+ matchedbarcodecode,
+ )
+ workspace.measurements.add_measurement(
+ self.input_object_name.value,
+ "_".join([C_CALL_BARCODES, "MatchedTo_Score"]),
+ scorelist,
+ )
+ if self.wants_call_image:
+ workspace.image_set.add(
+ self.outimage_calls_name.value,
+ cellprofiler_core.image.Image(
+ pixel_data_call.astype("uint16"), convert=False
+ ),
+ )
+ if self.wants_score_image:
+ workspace.image_set.add(
+ self.outimage_score_name.value,
+ cellprofiler_core.image.Image(
+ pixel_data_score.astype("uint16"), convert=False
+ ),
+ )
+
+ if self.show_window:
+ workspace.display_data.col_labels = (
+ "Image Mean Score",
+ "Image Mean Quality Score",
+ )
+ workspace.display_data.statistics = [imagemeanscore, imagemeanquality]
+
+ def display(self, workspace, figure):
+ statistics = workspace.display_data.statistics
+
+ figure.set_subplots((1, 1))
+
+ figure.subplot_table(0, 0, statistics)
+
+ def getallbarcodemeasurements(self, measurements, ncycles, examplemeas):
+ stem = re.split("Cycle", examplemeas)[0]
+ measurementdict = {}
+ for eachmeas in measurements:
+ if stem in eachmeas:
+ to_parse = re.split("Cycle", eachmeas)[1]
+ find_cycle = re.search("[0-9]{1,2}", to_parse)
+ parsed_cycle = int(find_cycle.group(0))
+ find_base = re.search("[A-Z]", to_parse)
+ parsed_base = find_base.group(0)
+ if parsed_cycle <= ncycles:
+ if parsed_cycle not in list(measurementdict.keys()):
+ measurementdict[parsed_cycle] = {eachmeas: parsed_base}
+ else:
+ measurementdict[parsed_cycle].update({eachmeas: parsed_base})
+ return measurementdict
+
+ def callonebarcode(
+ self, measurementdict, measurements, object_name, ncycles, objectcount
+ ):
+
+ master_cycles = []
+ score_array = numpy.zeros([ncycles, objectcount])
+
+ for eachcycle in range(1, ncycles + 1):
+ cycles_measures_perobj = []
+ cyclecode = []
+ cycledict = measurementdict[eachcycle]
+ cyclemeasures = list(cycledict.keys())
+ for eachmeasure in cyclemeasures:
+ cycles_measures_perobj.append(
+ measurements.get_current_measurement(object_name, eachmeasure)
+ )
+ cyclecode.append(measurementdict[eachcycle][eachmeasure])
+ cycle_measures_perobj = numpy.transpose(numpy.array(cycles_measures_perobj))
+ argmax_per_obj = numpy.argmax(cycle_measures_perobj, 1)
+ max_per_obj = numpy.max(cycle_measures_perobj, 1)
+ sum_per_obj = numpy.sum(cycle_measures_perobj, 1)
+ score_per_obj = max_per_obj / sum_per_obj
+ argmax_per_obj = list(argmax_per_obj)
+ argmax_per_obj = [cyclecode[x] for x in argmax_per_obj]
+
+ master_cycles.append(list(argmax_per_obj))
+ score_array[eachcycle - 1] = score_per_obj
+
+ mean_per_object = score_array.mean(axis=0)
+
+ return list(map("".join, zip(*master_cycles))), mean_per_object
+
+ def barcodeset(self, barcodecol, genecol):
+ fd = self.open_csv()
+ reader = csv.DictReader(fd)
+ barcodeset = {}
+ count = 1
+ for row in reader:
+ if len(row[barcodecol]) != 0:
+ barcodeset[row[barcodecol]] = (count, row[genecol])
+ count += 1
+ fd.close()
+ if self.has_empty_vector_barcode:
+ barcodeset[self.empty_vector_barcode_sequence.value] = (
+ count,
+ "EmptyVector",
+ )
+ return barcodeset
+
+ def queryall(self, cropped_barcode_dict, query):
+
+ cropped_barcode_list = list(cropped_barcode_dict.keys())
+
+ if query in cropped_barcode_list:
+ # is a perfect match
+ return 1, cropped_barcode_dict[query]
+
+ else:
+ scoredict = {
+ sum([1 for x in range(len(query)) if query[x] == y[x]])
+ / float(len(query)): y
+ for y in cropped_barcode_list
+ }
+ scores = list(scoredict.keys())
+ scores.sort(reverse=True)
+ return scores[0], cropped_barcode_dict[scoredict[scores[0]]]
+
+ def get_measurement_columns(self, pipeline):
+
+ input_object_name = self.input_object_name.value
+
+ result = [
+ (
+ "Image",
+ "_".join([C_CALL_BARCODES, "MeanBarcodeScore"]),
+ cellprofiler_core.constants.measurement.COLTYPE_FLOAT,
+ ),
+ (
+ "Image",
+ "_".join([C_CALL_BARCODES, "MeanQualityScore"]),
+ cellprofiler_core.constants.measurement.COLTYPE_FLOAT,
+ ),
+ ]
+
+ result += [
+ (
+ input_object_name,
+ "_".join([C_CALL_BARCODES, "BarcodeCalled"]),
+ cellprofiler_core.constants.measurement.COLTYPE_VARCHAR,
+ ),
+ (
+ input_object_name,
+ "_".join([C_CALL_BARCODES, "MatchedTo_Barcode"]),
+ cellprofiler_core.constants.measurement.COLTYPE_VARCHAR,
+ ),
+ (
+ input_object_name,
+ "_".join([C_CALL_BARCODES, "MatchedTo_ID"]),
+ cellprofiler_core.constants.measurement.COLTYPE_INTEGER,
+ ),
+ (
+ input_object_name,
+ "_".join([C_CALL_BARCODES, "MatchedTo_GeneCode"]),
+ cellprofiler_core.constants.measurement.COLTYPE_VARCHAR,
+ ),
+ (
+ input_object_name,
+ "_".join([C_CALL_BARCODES, "MatchedTo_Score"]),
+ cellprofiler_core.constants.measurement.COLTYPE_FLOAT,
+ ),
+ (
+ input_object_name,
+ "_".join([C_CALL_BARCODES, "MeanQualityScore"]),
+ cellprofiler_core.constants.measurement.COLTYPE_FLOAT,
+ ),
+ ]
+
+ return result
+
+ def get_categories(self, pipeline, object_name):
+ if object_name == self.input_object_name or object_name == "Image":
+ return [C_CALL_BARCODES]
+
+ return []
+
+ def get_measurements(self, pipeline, object_name, category):
+ if object_name == self.input_object_name and category == C_CALL_BARCODES:
+ return [
+ "BarcodeCalled",
+ "MatchedTo_Barcode",
+ "MatchedTo_ID",
+ "MatchedTo_GeneCode",
+ "MatchedTo_Score",
+ "MeanQualityScore",
+ ]
+
+ elif object_name == object_name == "Image":
+ return [
+ "MeanBarcodeScore",
+ "MeanQualityScore",
+ ]
+
+ return []
diff --git a/CP5/active_plugins/compensatecolors.py b/CP5/active_plugins/compensatecolors.py
new file mode 100644
index 00000000..8cc3dd11
--- /dev/null
+++ b/CP5/active_plugins/compensatecolors.py
@@ -0,0 +1,567 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import numpy
+
+import scipy.ndimage
+
+import skimage.exposure
+import skimage.filters
+import skimage.morphology
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import cellprofiler_core.image
+import cellprofiler_core.module
+import cellprofiler_core.setting
+
+__doc__ = """\
+CompensateColors
+================
+
+**CompensateColors** is a module to deconvolve spectral overlap between at least two sets of images.
+It can optionally be done within an object set.
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES YES
+============ ============ ===============
+
+
+What do I need as input?
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+At least two sets of images from which you want to remove spectral overlap.
+
+What do I get as output?
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+An equal number of images which have been treated with color compensation.
+
+Technical notes
+^^^^^^^^^^^^^^^
+
+Compensation will be performed between compensation classes so you need to assign your images to at least 2 compensation classes.
+Often, each compensation class is a separate channel used during image acquisition.
+
+"""
+
+COMPENSATE_SUFFIX = "Compensated"
+
+CC_IMAGES = "Across entire image"
+CC_OBJECTS = "Within objects"
+
+
+class CompensateColors(cellprofiler_core.module.ImageProcessing):
+
+ module_name = "CompensateColors"
+
+ variable_revision_number = 1
+
+ def create_settings(self):
+ self.image_groups = []
+ self.add_image(can_delete=False)
+ self.spacer_1 = cellprofiler_core.setting.Divider()
+ self.add_image(can_delete=False)
+ self.spacer_2 = cellprofiler_core.setting.Divider()
+ self.add_image_button = cellprofiler_core.setting.do_something.DoSomething(
+ "", "Add another image", self.add_image
+ )
+ self.images_or_objects = cellprofiler_core.setting.choice.Choice(
+ "Select where to perform color compensation",
+ [CC_IMAGES, CC_OBJECTS],
+ doc="""\
+ You can measure the correlation in several ways:
+
+ - *%(CC_OBJECTS)s:* Measure correlation only in those pixels previously
+ identified as within an object. You will be asked to choose which object
+ type to measure within.
+ - *%(CC_IMAGES)s:* Measure the correlation across all pixels in the
+ images.
+
+ All methods measure correlation on a pixel by pixel basis.
+ """
+ % globals(),
+ )
+
+ self.object_groups = []
+ self.add_object(can_delete=False)
+ self.object_count = cellprofiler_core.setting.HiddenCount(self.object_groups)
+ self.image_count = cellprofiler_core.setting.HiddenCount(self.image_groups)
+ self.do_rescale_input = cellprofiler_core.setting.choice.Choice(
+ "Should individual images be rescaled 0-1 before compensating pre-masking or on unmasked images?",
+ ["No", "Yes"],
+ doc="""\
+Choose if the images should be rescaled 0-1 before compensation.
+If performing compensation inside an object, rescaling will happen before masking to
+that object""",
+ )
+
+ self.do_rescale_after_mask = cellprofiler_core.setting.choice.Choice(
+ "Should images be rescaled 0-1 before compensating but after masking to objects?",
+ ["No", "Yes, per image", "Yes, per group"],
+ doc="""\
+Choose if the images should be rescaled 0-1 before compensation; you can choose whether
+to do this for each image individually or across all images in a group.
+If performing compensation inside an object, rescaling will happen after masking to
+that object""",
+ )
+
+ self.do_match_histograms = cellprofiler_core.setting.choice.Choice(
+ "Should histogram matching be performed between the image groups?",
+ [
+ "No",
+ "Yes, pre-masking or on unmasked images",
+ "Yes, post-masking to objects",
+ ],
+ doc="""\
+Choose if the images should undergo histogram equalization per group, and when
+to perform it if masking inside an object.""",
+ )
+
+ self.histogram_match_class = cellprofiler_core.setting.text.Integer(
+ "What compensation class should serve as the template histogram?", 1
+ )
+
+ self.do_rescale_output = cellprofiler_core.setting.choice.Choice(
+ "Should images be rescaled 0-1 after compensating?",
+ ["No", "Yes"],
+ doc="""\
+Choose if the images should be rescaled 0-1 after compensation; you can choose whether
+to do this for each image individually or across all images in a group.
+""",
+ )
+
+ self.do_scalar_multiply = cellprofiler_core.setting.Binary(
+ "Should the images be divided by a scalar based on group percentiles",
+ False,
+ doc="""\
+Choose if per group, the images should have a certain user-defined percentile compared,
+and then divided by the ratio between the percentile value of a given group and the
+dimmest group. Example: if the 99th percentile for A, C, G, and T is 0.5, 0.25, 0.3,
+and 0.1, respectively, the pixel values for those groups will be divided by 5, 2.5, 3,
+and 1. This will be applied before masking or rescaling or histogram compensation.""",
+ )
+
+ self.scalar_percentile = cellprofiler_core.setting.text.Float(
+ "What percentile should be used for multiplication",
+ value=99,
+ minval=0.1,
+ maxval=100,
+ doc="Enter a percentile between 0.1 and 100 to use for comparing ratios",
+ )
+
+ self.do_tophat_filter = cellprofiler_core.setting.Binary(
+ "Should the images have a tophat filter applied before correction?",
+ False,
+ doc="""\
+Whether or not to apply a tophat filter to enhance small bright spots. This filter will be
+applied before rescaling or any other enhancements.""",
+ )
+
+ self.tophat_radius = cellprofiler_core.setting.text.Integer(
+ "What size radius should be used for the tophat filter?",
+ value=3,
+ minval=1,
+ maxval=100,
+ doc="Enter a radius; a disk structuring element of this radius will be used for tophat filtering.",
+ )
+
+ self.do_LoG_filter = cellprofiler_core.setting.Binary(
+ "Should the images have a Laplacian of Gaussian filter applied before correction?",
+ False,
+ doc="""\
+Whether or not to apply a Laplacian of Gaussian. This filter will be
+applied before rescaling or any other enhancements (except tophat filtering if used).""",
+ )
+
+ self.LoG_radius = cellprofiler_core.setting.text.Integer(
+ "What size radius should be used for the LoG filter?",
+ value=1,
+ minval=1,
+ maxval=100,
+ doc="Enter a sigma in pixels; this sigma will be used for LoG filtering.",
+ )
+
+ self.do_DoG_filter = cellprofiler_core.setting.Binary(
+ "Should the images have a Difference of Gaussians filter applied before correction?",
+ False,
+ doc="""\
+Whether or not to apply a Difference of Gaussians. This filter will be
+applied before rescaling or any other enhancements (except tophat filtering and/or LoG filtering if used).""",
+ )
+
+ self.DoG_low_radius = cellprofiler_core.setting.text.Integer(
+ "What size sigma should be used for the DoG low sigma?",
+ value=3,
+ minval=1,
+ maxval=100,
+ doc="Enter a sigma in pixels; this sigma will be used for the lower kernel size.",
+ )
+
+ self.DoG_high_radius = cellprofiler_core.setting.text.Integer(
+ "What size radius should be used for the DoG low sigma?",
+ value=5,
+ minval=2,
+ maxval=101,
+ doc="Enter a sigma in pixels; this sigma will be used for the lower kernel size.",
+ )
+
+ def add_image(self, can_delete=True):
+ """Add an image to the image_groups collection
+
+ can_delete - set this to False to keep from showing the "remove"
+ button for images that must be present.
+ """
+ group = cellprofiler_core.setting.SettingsGroup()
+ if can_delete:
+ group.append("divider", cellprofiler_core.setting.Divider(line=False))
+ group.append(
+ "image_name",
+ cellprofiler_core.setting.subscriber.ImageSubscriber(
+ "Select an image to measure",
+ "None",
+ doc="Select an image to measure the correlation/colocalization in.",
+ ),
+ )
+ group.append(
+ "class_num",
+ cellprofiler_core.setting.text.Integer(
+ "What compensation class does this image belong to?",
+ 1,
+ doc="""\
+What compensation class does this image belong to?
+You need to divide your images into at least two compensation classes.
+Often, each imaging channel is a separate compensation class.""",
+ ),
+ )
+ group.append(
+ "output_name",
+ cellprofiler_core.setting.text.ImageName(
+ "Select an output image name",
+ "None",
+ doc="What would you like the compensated image to be named?",
+ ),
+ )
+ if (
+ len(self.image_groups) == 0
+ ): # Insert space between 1st two images for aesthetics
+ group.append("extra_divider", cellprofiler_core.setting.Divider(line=False))
+
+ if can_delete:
+ group.append(
+ "remover",
+ cellprofiler_core.setting.do_something.RemoveSettingButton(
+ "", "Remove this image", self.image_groups, group
+ ),
+ )
+
+ self.image_groups.append(group)
+
+ def add_object(self, can_delete=True):
+ """Add an object to the object_groups collection"""
+ group = cellprofiler_core.setting.SettingsGroup()
+ if can_delete:
+ group.append("divider", cellprofiler_core.setting.Divider(line=False))
+
+ group.append(
+ "object_name",
+ cellprofiler_core.setting.subscriber.LabelSubscriber(
+ "Select an object to perform compensation within",
+ "None",
+ doc="""\
+Select the objects to perform compensation within.""",
+ ),
+ )
+
+ if can_delete:
+ group.append(
+ "remover",
+ cellprofiler_core.setting.do_something.RemoveSettingButton(
+ "", "Remove this object", self.object_groups, group
+ ),
+ )
+ self.object_groups.append(group)
+
+ def settings(self):
+ """Return the settings to be saved in the pipeline"""
+ result = [self.image_count, self.object_count]
+ for image_group in self.image_groups:
+ result += [
+ image_group.image_name,
+ image_group.class_num,
+ image_group.output_name,
+ ]
+ result += [self.images_or_objects]
+ result += [object_group.object_name for object_group in self.object_groups]
+ result += [
+ self.do_rescale_input,
+ self.do_rescale_after_mask,
+ self.do_match_histograms,
+ self.histogram_match_class,
+ self.do_rescale_output,
+ ]
+ result += [self.do_scalar_multiply, self.scalar_percentile]
+ result += [
+ self.do_tophat_filter,
+ self.tophat_radius,
+ self.do_LoG_filter,
+ self.LoG_radius,
+ self.do_DoG_filter,
+ self.DoG_low_radius,
+ self.DoG_high_radius,
+ ]
+ return result
+
+ def prepare_settings(self, setting_values):
+ """Make sure there are the right number of image and object slots for the incoming settings"""
+ image_count = int(setting_values[0])
+ object_count = int(setting_values[1])
+
+ del self.image_groups[image_count:]
+ while len(self.image_groups) < image_count:
+ self.add_image()
+
+ del self.object_groups[object_count:]
+ while len(self.object_groups) < object_count:
+ self.add_object()
+
+ def visible_settings(self):
+ result = []
+ for image_group in self.image_groups:
+ result += image_group.visible_settings()
+ # result += [image_group.image_name, image_group.class_num, image_group.output_name]
+ # if image_group.can_delete:
+ # result += [image_group.remover]
+ result += [self.add_image_button, self.spacer_2, self.images_or_objects]
+ if self.images_or_objects == CC_OBJECTS:
+ for object_group in self.object_groups:
+ result += object_group.visible_settings()
+ result += [self.do_scalar_multiply]
+ if self.do_scalar_multiply:
+ result += [self.scalar_percentile]
+ result += [
+ self.do_rescale_input,
+ self.do_rescale_after_mask,
+ self.do_match_histograms,
+ ]
+ if self.do_match_histograms != "No":
+ result += [self.histogram_match_class]
+ result += [self.do_rescale_output]
+ result += [self.do_tophat_filter]
+ if self.do_tophat_filter:
+ result += [self.tophat_radius]
+ result += [self.do_LoG_filter]
+ if self.do_LoG_filter:
+ result += [self.LoG_radius]
+ result += [self.do_DoG_filter]
+ if self.do_DoG_filter:
+ result += [self.DoG_low_radius, self.DoG_high_radius]
+ return result
+
+ def run(self, workspace):
+
+ # so far this seems to work best with first masking to objects, then doing 2x2 (A and C, G and T)
+
+ imdict = {}
+
+ sample_image = workspace.image_set.get_image(
+ self.image_groups[0].image_name.value
+ )
+ sample_pixels = sample_image.pixel_data
+ sample_shape = sample_pixels.shape
+
+ group_scaling = {}
+
+ if self.do_scalar_multiply.value:
+ temp_im_dict = {}
+ for eachgroup in self.image_groups:
+ eachimage = workspace.image_set.get_image(
+ eachgroup.image_name.value
+ ).pixel_data
+ if eachgroup.class_num.value not in temp_im_dict.keys():
+ temp_im_dict[eachgroup.class_num.value] = list(eachimage)
+ else:
+ temp_im_dict[eachgroup.class_num.value] += list(eachimage)
+ for eachclass in temp_im_dict.keys():
+ group_scaling[eachclass] = numpy.percentile(
+ temp_im_dict[eachclass], self.scalar_percentile.value
+ )
+ min_intensity = numpy.min(list(group_scaling.values()))
+ for key, value in iter(group_scaling.items()):
+ group_scaling[key] = value / min_intensity
+
+ else:
+ for eachgroup in self.image_groups:
+ if eachgroup.class_num.value not in group_scaling.keys():
+ group_scaling[eachgroup.class_num.value] = 1.0
+
+ if self.images_or_objects.value == CC_OBJECTS:
+ object_name = self.object_groups[0]
+ objects = workspace.object_set.get_objects(object_name.object_name.value)
+ object_labels = objects.segmented
+ object_mask = numpy.where(object_labels > 0, 1, 0)
+
+ for eachgroup in self.image_groups:
+ eachimage = workspace.image_set.get_image(
+ eachgroup.image_name.value
+ ).pixel_data
+
+ if self.do_tophat_filter.value:
+ selem = skimage.morphology.disk(radius=int(self.tophat_radius.value))
+ eachimage = skimage.morphology.white_tophat(eachimage, selem)
+
+ if self.do_LoG_filter.value:
+ eachimage = self.log_ndi(eachimage, int(self.LoG_radius.value))
+
+ if self.do_DoG_filter.value:
+ eachimage = skimage.filters.difference_of_gaussians(
+ eachimage,
+ int(self.DoG_low_radius.value),
+ int(self.DoG_high_radius.value),
+ )
+
+ eachimage = eachimage / group_scaling[eachgroup.class_num.value]
+ if self.do_rescale_input.value == "Yes":
+ eachimage = skimage.exposure.rescale_intensity(
+ eachimage,
+ in_range=(eachimage.min(), eachimage.max()),
+ out_range=((1.0 / 65535), 1.0),
+ )
+ if self.do_rescale_after_mask.value == "Yes, per image":
+ eachimage = eachimage * object_mask
+ eachimage_no_bg = eachimage[
+ eachimage != 0
+ ] # don't measure the background
+ eachimage = skimage.exposure.rescale_intensity(
+ eachimage,
+ in_range=(eachimage_no_bg.min(), eachimage_no_bg.max()),
+ out_range=((1.0 / 65535), 1.0),
+ )
+ eachimage = numpy.round(eachimage * 65535)
+ if eachgroup.class_num.value not in imdict.keys():
+ imdict[eachgroup.class_num.value] = [
+ [eachgroup.image_name.value],
+ eachimage.reshape(-1),
+ [eachgroup.output_name.value],
+ ]
+ else:
+ imdict[eachgroup.class_num.value][0].append(eachgroup.image_name.value)
+ imdict[eachgroup.class_num.value][1] = numpy.concatenate(
+ (imdict[eachgroup.class_num.value][1], eachimage.reshape(-1))
+ )
+ imdict[eachgroup.class_num.value][2].append(eachgroup.output_name.value)
+
+ keys = list(imdict.keys())
+ keys.sort()
+
+ if self.do_match_histograms.value != "No":
+ histogram_template = imdict[self.histogram_match_class.value][1]
+ if self.do_match_histograms.value == "Yes, post-masking to objects":
+ histogram_mask = numpy.tile(
+ object_mask.reshape(-1),
+ len(imdict[self.histogram_match_class.value][0]),
+ )
+ histogram_template = histogram_mask * histogram_template
+ histogram_template = numpy.where(
+ histogram_template == 0, 1, histogram_template
+ )
+
+ # apply transformations, if any
+ for eachkey in keys:
+ reshaped_pixels = imdict[eachkey][1]
+ if (
+ self.do_match_histograms.value
+ == "Yes, pre-masking or on unmasked images"
+ ):
+ if eachkey != self.histogram_match_class.value:
+ reshaped_pixels = skimage.exposure.match_histograms(
+ reshaped_pixels, histogram_template
+ )
+ if self.images_or_objects.value == CC_OBJECTS:
+ category_count = len(imdict[eachkey][0])
+ category_mask = numpy.tile(object_mask.reshape(-1), category_count)
+ reshaped_pixels = reshaped_pixels * category_mask
+ reshaped_pixels = numpy.where(reshaped_pixels == 0, 1, reshaped_pixels)
+ if self.do_rescale_after_mask.value == "Yes, per group":
+ reshaped_pixels_no_bg = reshaped_pixels[
+ reshaped_pixels > 1
+ ] # don't measure the background
+ reshaped_pixels = skimage.exposure.rescale_intensity(
+ reshaped_pixels,
+ in_range=(reshaped_pixels_no_bg.min(), reshaped_pixels_no_bg.max()),
+ out_range=(1, 65535),
+ )
+ if self.do_match_histograms.value == "Yes, post-masking to objects":
+ if eachkey != self.histogram_match_class.value:
+ reshaped_pixels = skimage.exposure.match_histograms(
+ reshaped_pixels, histogram_template
+ )
+ imdict[eachkey][1] = reshaped_pixels
+
+ imlist = []
+ for eachkey in keys:
+ imlist.append(imdict[eachkey][1])
+ X = numpy.array(imlist)
+ X = X.T
+
+ M = self.get_medians(X).T
+ M = M / M.sum(axis=0)
+ W = numpy.linalg.inv(M)
+ Y = W.dot(X.T).astype(int)
+
+ for eachdim in range(Y.shape[0]):
+ key = keys[eachdim]
+ im_out = Y[eachdim].reshape(
+ len(imdict[key][0]), sample_shape[0], sample_shape[1]
+ )
+ im_out = im_out / 65535.0
+ for each_im in range(len(imdict[key][0])):
+ im_out[each_im] = numpy.where(im_out[each_im] < 0, 0, im_out[each_im])
+ im_out[each_im] = numpy.where(im_out[each_im] > 1, 1, im_out[each_im])
+ if self.do_rescale_output.value == "Yes":
+ im_out[each_im] = skimage.exposure.rescale_intensity(
+ im_out[each_im],
+ in_range=(im_out[each_im].min(), im_out[each_im].max()),
+ out_range=(0.0, 1.0),
+ )
+ output_image = cellprofiler_core.image.Image(
+ im_out[each_im],
+ parent_image=workspace.image_set.get_image(imdict[key][0][each_im]),
+ )
+ workspace.image_set.add(imdict[key][2][each_im], output_image)
+
+ #
+ # "volumetric" indicates whether or not this module supports 3D images.
+ # The "gradient_image" function is inherently 2D, and we've noted this
+ # in the documentation for the module. Explicitly return False here
+ # to indicate that 3D images are not supported.
+ #
+ def volumetric(self):
+ return False
+
+ def get_medians(self, X):
+ arr = []
+ for i in range(X.shape[1]):
+ arr += [numpy.median(X[X.argmax(axis=1) == i], axis=0)]
+ M = numpy.array(arr)
+ return M
+
+ def log_ndi(self, data, sigma):
+ """ """
+ data = skimage.img_as_uint(data)
+ f = scipy.ndimage.gaussian_laplace
+ arr_ = -1 * f(data.astype(float), sigma)
+ arr_ = numpy.clip(arr_, 0, 65535) / 65535
+
+ return skimage.img_as_float(arr_)
diff --git a/CP5/active_plugins/cpij/__init__.py b/CP5/active_plugins/cpij/__init__.py
new file mode 100644
index 00000000..50965596
--- /dev/null
+++ b/CP5/active_plugins/cpij/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+
+__author__ = "Mark Hiner, Alice Lucas, Beth Cimini"
+__all__ = ["bridge", "server"]
diff --git a/CP5/active_plugins/cpij/bridge.py b/CP5/active_plugins/cpij/bridge.py
new file mode 100644
index 00000000..7f7152a7
--- /dev/null
+++ b/CP5/active_plugins/cpij/bridge.py
@@ -0,0 +1,138 @@
+from multiprocessing.managers import SyncManager
+import multiprocessing as mp
+import atexit, cpij.server as ijserver
+from queue import Queue
+from threading import Lock
+
+
+class QueueManager(SyncManager):
+ pass
+
+
+QueueManager.register("input_queue")
+QueueManager.register("output_queue")
+QueueManager.register("get_lock")
+
+_init_method = None
+
+
+def init_method():
+ global _init_method
+ if not _init_method:
+ if ijserver.is_server_running():
+ l = lock()
+ l.acquire()
+ to_imagej().put(
+ {ijserver.PYIMAGEJ_KEY_COMMAND: ijserver.PYIMAGEJ_CMD_GET_INIT_METHOD}
+ )
+ _init_method = from_imagej().get()[ijserver.PYIMAGEJ_KEY_OUTPUT]
+ l.release()
+
+ return _init_method
+
+
+def lock() -> Lock:
+ """
+ Helper method to synchronzie requests with the ImageJ server.
+
+ A lock should be acquired before sending data to the server, and released after
+ receiving the result.
+
+ Returns
+ ---------
+ A Lock connected to the ImageJ server.
+ """
+ return _manager().get_lock()
+
+
+def to_imagej() -> Queue:
+ """
+ Helper method to send data to the ImageJ server
+
+ Returns
+ ---------
+ A Queue connected to the ImageJ server. Only its put method should be called.
+ """
+ return _manager().input_queue()
+
+
+def from_imagej() -> Queue:
+ """
+ Helper method to retrieve data from the ImageJ server
+
+ Returns
+ ---------
+ A Queue connected to the ImageJ server. Only its get method should be called.
+ """
+ return _manager().output_queue()
+
+
+def init_pyimagej(init_string):
+ """
+ Start the pyimagej daemon thread if it isn't already running.
+
+ Parameters
+ ----------
+ init_string : str, optional
+ This can be a path to a local ImageJ installation, or an initialization string per imagej.init(),
+ e.g. sc.fiji:fiji:2.1.0
+ """
+ to_imagej().put(
+ {
+ ijserver.PYIMAGEJ_KEY_COMMAND: ijserver.PYIMAGEJ_CMD_START,
+ ijserver.PYIMAGEJ_KEY_INPUT: init_string,
+ }
+ )
+ result = from_imagej().get()
+ if result == ijserver.PYIMAGEJ_STATUS_STARTUP_FAILED:
+ _shutdown_imagej()
+ # Wait for the server to shut down
+ while ijserver.is_server_running():
+ pass
+ return False
+
+ global _init_method
+ _init_method = init_string
+ return True
+
+
+def _manager() -> QueueManager:
+ """
+ Helper method to return a QueueManager connected to the ImageJ server
+ """
+ if not ijserver.is_server_running():
+ raise RuntimeError("No ImageJ server instance available")
+
+ manager = QueueManager(
+ address=("127.0.0.1", ijserver.SERVER_PORT), authkey=ijserver._SERVER_KEY
+ )
+ manager.connect()
+ return manager
+
+
+def _shutdown_imagej():
+ """
+ Helper method to send the shutdown signal to ImageJ. Intended to be called
+ at process exit.
+ """
+ if ijserver.is_server_running():
+ to_imagej().put({ijserver.PYIMAGEJ_KEY_COMMAND: ijserver.PYIMAGEJ_CMD_EXIT})
+
+
+def start_imagej_server():
+ """
+ If the ImageJ server is not already running, spawns the server in a new
+ Process. Blocks until the server is up and running.
+ """
+ if ijserver.is_server_running():
+ return
+
+ ctx = mp.get_context("spawn")
+ p = ctx.Process(target=ijserver.main)
+ p.start()
+
+ # wait for the server to start up
+ ijserver.wait_for_server_startup()
+
+ # Ensure server shuts down when main app closes
+ atexit.register(_shutdown_imagej)
diff --git a/CP5/active_plugins/cpij/server.py b/CP5/active_plugins/cpij/server.py
new file mode 100644
index 00000000..a40fc107
--- /dev/null
+++ b/CP5/active_plugins/cpij/server.py
@@ -0,0 +1,506 @@
+from pathlib import Path
+from multiprocessing.managers import SyncManager
+from queue import Queue
+from cellprofiler_core.image import Image
+from cellprofiler_core.setting.text.alphanumeric.name.image_name import ImageName
+from cellprofiler_core.setting.text import (
+ Filename,
+ Directory,
+ Alphanumeric,
+ Integer,
+ Float,
+)
+from cellprofiler_core.setting.subscriber import ImageSubscriber
+from cellprofiler_core.setting import ValidationError
+import jpype, imagej, multiprocessing, socket, threading, time
+import skimage.io
+
+
+"""
+Constants for communicating with pyimagej
+"""
+PYIMAGEJ_KEY_COMMAND = "KEY_COMMAND" # Matching value indicates what command to execute
+PYIMAGEJ_KEY_INPUT = "KEY_INPUT" # Matching value is command-specific input object
+PYIMAGEJ_KEY_OUTPUT = "KEY_OUTPUT" # Matching value is command-specific output object
+PYIMAGEJ_CMD_START = "COMMAND_START" # Start the PyImageJ instance + JVM
+PYIMAGEJ_CMD_GET_INIT_METHOD = (
+ "COMMAND_GET_INIT_METHOD" # Get the initialization string used for PyImageJ
+)
+PYIMAGEJ_CMD_SCRIPT_PARSE = "COMMAND_SCRIPT_PARAMS" # Parse a script file's parameters
+PYIMAGEJ_SCRIPT_PARSE_INPUTS = "SCRIPT_PARSE_INPUTS" # Script input dictionary key
+PYIMAGEJ_SCRIPT_PARSE_OUTPUTS = "SCRIPT_PARSE_OUTPUTS" # Script output dictionary key
+PYIMAGEJ_CMD_SCRIPT_RUN = "COMMAND_SCRIPT_RUN" # Run a script
+PYIMAGEJ_SCRIPT_RUN_FILE_KEY = "SCRIPT_RUN_FILE_KEY" # The script filename key
+PYIMAGEJ_SCRIPT_RUN_INPUT_KEY = (
+ "SCRIPT_RUN_INPUT_KEY" # The script input dictionary key
+)
+PYIMAGEJ_SCRIPT_RUN_CONVERT_IMAGES = (
+ "SCRIPT_RUN_CONVERT_IMAGES" # Whether images should be converted or not
+)
+PYIMAGEJ_CMD_EXIT = "COMMAND_EXIT" # Shut down the pyimagej daemon
+PYIMAGEJ_STATUS_CMD_UNKNOWN = (
+ "STATUS_COMMAND_UNKNOWN" # Returned when an unknown command is passed to pyimagej
+)
+PYIMAGEJ_STATUS_STARTUP_COMPLETE = (
+ "STATUS_STARTUP_COMPLETE" # Returned after initial startup before daemon loop
+)
+PYIMAGEJ_STATUS_STARTUP_FAILED = (
+ "STATUS_STARTUP_FAILED" # Returned when imagej.init fails
+)
+PYIMAGEJ_STATUS_SHUTDOWN_COMPLETE = (
+ "STATUS_SHUTDOWN_COMPLETE" # Returned when imagej + jpype JVM have closed
+)
+INIT_LOCAL = "Local"
+INIT_ENDPOINT = "Endpoint"
+INIT_LATEST = "Latest"
+INPUT_CLASS = "INPUT"
+OUTPUT_CLASS = "OUTPUT"
+
+SERVER_PORT = 45923
+# FIXME this needs to be encrypted somehow
+_SERVER_KEY = b"abracadabra"
+
+_in_queue = Queue()
+_out_queue = Queue()
+_sync_lock = threading.Lock()
+
+
+class QueueManager(SyncManager):
+ pass
+
+
+QueueManager.register("input_queue", callable=lambda: _in_queue)
+QueueManager.register("output_queue", callable=lambda: _out_queue)
+QueueManager.register("get_lock", callable=lambda: _sync_lock)
+
+
+class Character(Alphanumeric):
+ """
+ A Setting for text entries of size one
+ """
+
+ def __init__(self, text, value, *args, **kwargs):
+ super().__init__(text, value, *args, **kwargs)
+
+ def test_valid(self, pipeline):
+ """
+ Restrict value to single character
+ """
+ super().test_valid(pipeline)
+ if len(self.value) > 1:
+ raise ValidationError("Only single characters can be used.", self)
+
+
+class Boolean(Integer):
+ """
+ A helper setting for boolean values, converting 0 to False and any other number to True
+ """
+
+ def __init__(self, text, value, *args, **kwargs):
+ super().__init__(
+ text,
+ value,
+ doc="""\
+Enter '0' for \"False\" and any other value for \"True\"
+""",
+ *args,
+ **kwargs,
+ )
+
+ def get_value(self, reraise=False):
+ v = super().get_value(reraise)
+ if v == 0:
+ return False
+
+ return True
+
+
+def _preprocess_script_inputs(ij, input_map, convert_images):
+ """
+ Helper method to convert pythonic inputs to something that can be handled by ImageJ
+
+ In particular this is necessary for image inputs which won't be auto-converted by Jpype
+
+ Parameters
+ ----------
+ ij : imagej.init(), required
+ ImageJ entry point (from imagej.init())
+ input_map: map, required
+ map of input names to values
+ convert_images: boolean, required
+ boolean indicating if image inputs and outputs should be auto-converted to appropriate numeric types
+ """
+ for key in input_map:
+ if isinstance(input_map[key], Image):
+ cp_image = input_map[key].get_image()
+ # CellProfiler images are typically stored as floats which can cause unexpected results in ImageJ.
+ # By default, we convert to 16-bit int type, unless we're sure it's 8 bit in which case we use that.
+ if convert_images:
+ if input_map[key].scale == 255:
+ cp_image = skimage.img_as_ubyte(cp_image)
+ else:
+ cp_image = skimage.img_as_uint(cp_image)
+ input_map[key] = ij.py.to_dataset(cp_image)
+
+
+def _convert_java_to_python_type(ij, return_value):
+ """
+ Helper method to convert ImageJ/Java values to python values that can be passed between via queues (pickled)
+
+ Parameters
+ ----------
+ ij : imagej.init(), required
+ ImageJ entry point
+ return_value : supported Java type, required
+ A value to convert from Java to python
+
+ Returns
+ ---------
+ An instance of a python type that can safely cross queues with the given value, or None if no valid type exists.
+ """
+ if return_value is None:
+ return None
+ return_class = return_value.getClass()
+ type_string = str(return_class.toString()).split()[1]
+
+ image_classes = (
+ jpype.JClass("ij.ImagePlus"),
+ jpype.JClass("net.imagej.Dataset"),
+ jpype.JClass("net.imagej.ImgPlus"),
+ )
+
+ if type_string == "java.lang.String" or type_string == "java.lang.Character":
+ return str(return_value)
+ elif (
+ type_string == "java.lang.Integer"
+ or type_string == "java.lang.Long"
+ or type_string == "java.lang.Short"
+ ):
+ return int(return_value)
+ elif type_string == "java.lang.Float" or type_string == "java.lang.Double":
+ return float(return_value)
+ elif type_string == "java.lang.Boolean":
+ if return_value:
+ return True
+ else:
+ return False
+ elif type_string == "java.lang.Byte":
+ return bytes(return_value)
+ elif bool(
+ (
+ img_class
+ for img_class in image_classes
+ if issubclass(return_class, img_class)
+ )
+ ):
+ # TODO actualize changes in a virtual ImagePlus. Remove this when pyimagej does this innately
+ if issubclass(return_class, jpype.JClass("ij.ImagePlus")):
+ ij.py.synchronize_ij1_to_ij2(return_value)
+ py_img = ij.py.from_java(return_value)
+
+ # HACK
+ # Workaround for DataArrays potentially coming back with Java names. Fixed upstream in:
+ # https://github.com/imagej/pyimagej/commit/a1861b6c1658d6751fa314650b13411f956549ab
+ py_img.name = ij.py.from_java(py_img.name)
+ return py_img
+
+ # Not a supported type
+ return None
+
+
+def convert_java_type_to_setting(param_name, param_type, param_class):
+ """
+ Helper method to convert ImageJ/Java class parameter types to CellProfiler settings
+
+ Parameters
+ ----------
+ param_name : str, required
+ The name of the parameter
+ param_type : str, required
+ The Java class name describing the parameter type
+ param_class: str, required
+ One of {input_class} or {output_class}, based on the parameter use
+
+ Returns
+ ---------
+ One or more Settings of a type appropriate for param_type, named with param_name. Or None if no valid conversion exists.
+ """
+ type_string = param_type.split()[1]
+ img_strings = ("ij.ImagePlus", "net.imagej.Dataset", "net.imagej.ImgPlus")
+ if INPUT_CLASS == param_class:
+ param_label = param_name
+ if type_string == "java.lang.String":
+ return Alphanumeric(param_label, "")
+ if type_string == "java.lang.Character":
+ return Character(param_label, "")
+ elif type_string == "java.lang.Integer":
+ return Integer(param_label, 0, minval=-(2**31), maxval=((2**31) - 1))
+ elif type_string == "java.lang.Long":
+ return Integer(param_label, 0, minval=-(2**63), maxval=((2**63) - 1))
+ elif type_string == "java.lang.Short":
+ return Integer(param_label, 0, minval=-32768, maxval=32767)
+ elif type_string == "java.lang.Byte":
+ return Integer(param_label, 0, minval=-128, maxval=127)
+ elif type_string == "java.lang.Boolean":
+ return Boolean(param_label, 0)
+ elif type_string == "java.lang.Float":
+ return Float(param_label, minval=-(2**31), maxval=((2**31) - 1))
+ elif type_string == "java.lang.Double":
+ return Float(param_label, minval=-(2**63), maxval=((2**63) - 1))
+ elif type_string == "java.io.File":
+ param_dir = Directory(f"{param_label} directory", allow_metadata=False)
+
+ def set_directory_fn_app(path):
+ dir_choice, custom_path = param_dir.get_parts_from_path(path)
+ param_dir.join_parts(dir_choice, custom_path)
+
+ param_file = Filename(
+ param_label,
+ param_label,
+ get_directory_fn=param_dir.get_absolute_path,
+ set_directory_fn=set_directory_fn_app,
+ browse_msg=f"Choose {param_label} file",
+ )
+ return (param_dir, param_file)
+ elif bool(
+ (img_string for img_string in img_strings if type_string == img_string)
+ ):
+ return ImageSubscriber(param_label)
+ elif OUTPUT_CLASS == param_class:
+ if bool(
+ (img_string for img_string in img_strings if type_string == img_string)
+ ):
+ return ImageName(
+ "[OUTPUT, " + type_string + "] " + param_name,
+ param_name,
+ doc="""
+ You may use this setting to rename the indicated output variable, if desired.
+ """,
+ )
+
+ return None
+
+
+def _start_imagej_process():
+ """Python script to run when starting a new ImageJ process.
+
+ All commands are initiated by adding a dictionary with a {pyimagej_key_command} entry to the {input_queue}. This
+ indicating which supported command should be executed. Some commands may take additional input, which is specified
+ in the dictionary with {pyimagej_key_input}.
+
+ Outputs are returned by adding a dictionary to the {output_queue} with the {pyimagej_key_output} key, or
+ {pyimagej_key_error} if an error occurred during script execution.
+
+ NB: must be run from the main thread in order to eventually shut down the JVM.
+
+ Supported commands
+ ----------
+ {pyimagej_cmd_start} : start the pyimagej instance if it's not already running
+ inputs: initialization string for imagej.init()
+ outputs: either {pyimagej_status_startup_complete} or {pyimagej_status_startup_failed} as appropriate
+ {pyimagej_cmd_script_parse} : parse the parameters from an imagej script.
+ inputs: script filename
+ outputs: dictionary with mappings
+ {pyimagej_script_parse_inputs} -> dictionary of input field name/value pairs
+ {pyimagej_script_parse_outputs} -> dictionary of output field name/value pairs
+ {pyimagej_cmd_script_run} : takes a set of named inputs from CellProfiler and runs the given imagej script
+ inputs: dictionary with mappings
+ {pyimagej_script_run_file_key} -> script filename
+ {pyimagej_script_run_input_key} -> input parameter name/value dictionary
+ outputs: dictionary containing output field name/value pairs
+ {pyimagej_cmd_exit} : shut down the pyimagej daemon.
+ inputs: none
+ outputs: {pyimagej_status_shutdown_complete}
+
+ Return values
+ ----------
+ {pyimagej_status_cmd_unknown} : unrecognized command, no further output is coming
+ """
+
+ manager = QueueManager(address=("127.0.0.1", SERVER_PORT), authkey=_SERVER_KEY)
+ manager.connect()
+ input_queue = manager.input_queue()
+ output_queue = manager.output_queue()
+
+ ij = False
+ script_service = None
+ init_string = None
+
+ # Main daemon loop, polling the input queue
+ while True:
+ command_dictionary = input_queue.get()
+ cmd = command_dictionary[PYIMAGEJ_KEY_COMMAND]
+ if cmd == PYIMAGEJ_CMD_START and not ij:
+ init_string = command_dictionary[PYIMAGEJ_KEY_INPUT]
+ try:
+ if init_string:
+ # Attempt to initialize with the given string
+ ij = imagej.init(init_string)
+ else:
+ ij = imagej.init()
+ init_string = INIT_LATEST
+ if not ij:
+ init_string = None
+ output_queue.put(PYIMAGEJ_STATUS_STARTUP_FAILED)
+ else:
+ script_service = ij.script()
+ output_queue.put(PYIMAGEJ_STATUS_STARTUP_COMPLETE)
+ except jpype.JException as ex:
+ # Initialization failed
+ output_queue.put(PYIMAGEJ_STATUS_STARTUP_FAILED)
+ jpype.shutdownJVM()
+ elif cmd == PYIMAGEJ_CMD_GET_INIT_METHOD:
+ output_queue.put({PYIMAGEJ_KEY_OUTPUT: init_string})
+ elif cmd == PYIMAGEJ_CMD_SCRIPT_PARSE:
+ script_path = command_dictionary[PYIMAGEJ_KEY_INPUT]
+ script_file = Path(script_path)
+ script_info = script_service.getScript(script_file)
+ script_inputs = {}
+ script_outputs = {}
+ for script_in in script_info.inputs():
+ script_inputs[str(script_in.getName())] = str(
+ script_in.getType().toString()
+ )
+ for script_out in script_info.outputs():
+ script_outputs[str(script_out.getName())] = str(
+ script_out.getType().toString()
+ )
+ output_queue.put(
+ {
+ PYIMAGEJ_SCRIPT_PARSE_INPUTS: script_inputs,
+ PYIMAGEJ_SCRIPT_PARSE_OUTPUTS: script_outputs,
+ }
+ )
+ elif cmd == PYIMAGEJ_CMD_SCRIPT_RUN:
+ script_path = (command_dictionary[PYIMAGEJ_KEY_INPUT])[
+ PYIMAGEJ_SCRIPT_RUN_FILE_KEY
+ ]
+ script_file = Path(script_path)
+ input_map = (command_dictionary[PYIMAGEJ_KEY_INPUT])[
+ PYIMAGEJ_SCRIPT_RUN_INPUT_KEY
+ ]
+ convert_types = (command_dictionary[PYIMAGEJ_KEY_INPUT])[
+ PYIMAGEJ_SCRIPT_RUN_CONVERT_IMAGES
+ ]
+ _preprocess_script_inputs(ij, input_map, convert_types)
+ script_out_map = (
+ script_service.run(script_file, True, input_map).get().getOutputs()
+ )
+ output_dict = {}
+ for entry in script_out_map.entrySet():
+ key = str(entry.getKey())
+ value = _convert_java_to_python_type(ij, entry.getValue())
+ if value is not None:
+ output_dict[key] = value
+
+ output_queue.put({PYIMAGEJ_KEY_OUTPUT: output_dict})
+ elif cmd == PYIMAGEJ_CMD_EXIT:
+ break
+ else:
+ output_queue.put(PYIMAGEJ_STATUS_CMD_UNKNOWN)
+
+ # Shut down the imagej process
+ if ij:
+ ij.dispose()
+ jpype.shutdownJVM()
+ output_queue.put(PYIMAGEJ_STATUS_SHUTDOWN_COMPLETE)
+
+
+def _start_server():
+ """
+ Start the server that will be used for sending communication between ImageJ
+ and CellProfiler.
+
+ NB: this method will permanently block its thread.
+ """
+ m = QueueManager(address=("", SERVER_PORT), authkey=_SERVER_KEY)
+ s = m.get_server()
+ s.serve_forever()
+
+
+def _start_thread(target=None, args=(), name=None, daemon=True):
+ """
+ Create and start a thread to run a given target
+
+ Parameters
+ ----------
+ target : runnable
+ Same as threading.Thread
+ args : list
+ Same as threading.Thread
+ name : string
+ Same as threading.Thread
+ daemon : whether or not the thread should be a daemon
+ Default True
+ """
+ thread = threading.Thread(target=target, args=args, name=name)
+ thread.daemon = daemon
+ thread.start()
+
+
+def is_server_running(timeout=0.25):
+ """
+ Helper method to determine if the ImageJ server is up and running.
+
+ Parameters
+ ----------
+ timeout : number, optional (default 0.25)
+ Duration in seconds to wait when connecting to server
+
+ Return values
+ ----------
+ True if there was a response from the server. False otherwise.
+ """
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ s.settimeout(timeout)
+ return s.connect_ex(("localhost", SERVER_PORT)) == 0
+
+
+def wait_for_server_startup(timeout=15):
+ """
+ Helper method that blocks until the timeout value is reached, or the ImageJ
+ server becomes available for connection.
+
+ Parameters
+ ----------
+ timeout : number, optional (default 15)
+ Duration in seconds to wait for the server to start
+
+ Errors
+ ----------
+ RuntimeError
+ If timeout is exceeded
+ """
+ max_attempts = timeout * 4
+ current_attempt = 0
+ while (not is_server_running(0.01)) and (current_attempt < max_attempts):
+ time.sleep(0.25)
+ current_attempt += 1
+ pass
+
+ if current_attempt >= max_attempts:
+ raise RuntimeError(f"ImageJ server failed to start within allotted time.")
+
+
+def main():
+ """
+ Start the two pyimagej server components:
+ - This will create a new "imagej-server" thread that handles inter-process
+ communication
+ - The main thread will block in a poll listening for that
+ communication, and interacting with the Java ImageJ process.
+
+ Because this runs indefinitely until instructed to shut down,
+ this method should be called in a new subprocess.
+ """
+ multiprocessing.freeze_support()
+
+ _start_thread(target=_start_server, name="imagej-server")
+
+ wait_for_server_startup()
+
+ _start_imagej_process()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/CP5/active_plugins/declumpobjects.py b/CP5/active_plugins/declumpobjects.py
new file mode 100644
index 00000000..cc5219b1
--- /dev/null
+++ b/CP5/active_plugins/declumpobjects.py
@@ -0,0 +1,317 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import numpy
+import skimage.morphology
+import skimage.segmentation
+import scipy.ndimage
+import skimage.filters
+import skimage.feature
+import skimage.util
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import cellprofiler_core.image
+import cellprofiler_core.module
+import cellprofiler_core.setting
+import cellprofiler_core.setting.text
+import cellprofiler_core.setting.choice
+import cellprofiler_core.object
+from cellprofiler_core.module.image_segmentation import ObjectProcessing
+from cellprofiler_core.setting.subscriber import ImageSubscriber
+
+"""
+DeclumpObjects
+==============
+
+**DeclumpObjects** will split objects based on a seeded watershed method
+
+#. Compute the `local maxima`_ (either through the `Euclidean distance transformation`_
+of the segmented objects or through the intensity values of a reference image
+
+#. Dilate the seeds as specified
+
+#. Use these seeds as markers for watershed
+
+NOTE: This implementation is based off of the **IdentifyPrimaryObjects** declumping implementation.
+For more information, see the aforementioned module.
+
+.. _Euclidean distance transformation: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.morphology.distance_transform_edt.html
+.. _local maxima: http://scikit-image.org/docs/dev/api/skimage.feature.html#peak-local-max
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES NO
+============ ============ ===============
+
+"""
+
+O_SHAPE = "Shape"
+O_INTENSITY = "Intensity"
+
+
+class DeclumpObjects(ObjectProcessing):
+ category = "Advanced"
+
+ module_name = "DeclumpObjects"
+
+ variable_revision_number = 1
+
+ def create_settings(self):
+ super(DeclumpObjects, self).create_settings()
+
+ self.declump_method = cellprofiler_core.setting.choice.Choice(
+ text="Declump method",
+ choices=[O_SHAPE, O_INTENSITY],
+ value=O_SHAPE,
+ doc="""\
+This setting allows you to choose the method that is used to draw the
+line between segmented objects.
+
+- *{O_SHAPE}:* Dividing lines between clumped objects are based on
+ the shape of the clump. For example, when a clump contains two
+ objects, the dividing line will be placed where indentations occur
+ between the two objects. The intensity of the original image is
+ not necessary in this case.
+
+ **Technical description:** The distance transform of the segmentation
+ is used to identify local maxima as seeds (i.e. the centers of the
+ individual objects), and the seeds are then used on the inverse of
+ that distance transform to determine new segmentations via watershed.
+
+- *{O_INTENSITY}:* Dividing lines between clumped objects are determined
+ based on the intensity of the original image. This works best if the
+ dividing line between objects is dimmer than the objects themselves.
+
+ **Technical description:** The distance transform of the segmentation
+ is used to identify local maxima as seeds (i.e. the centers of the
+ individual objects). Those seeds are then used as markers for a
+ watershed on the inverted original intensity image.
+""".format(
+ **{"O_SHAPE": O_SHAPE, "O_INTENSITY": O_INTENSITY}
+ ),
+ )
+
+ self.reference_name = ImageSubscriber(
+ text="Reference Image",
+ doc="Image to reference for the *{O_INTENSITY}* method".format(
+ **{"O_INTENSITY": O_INTENSITY}
+ ),
+ )
+
+ self.gaussian_sigma = cellprofiler_core.setting.text.Float(
+ text="Segmentation distance transform smoothing factor",
+ value=1.0,
+ doc="Sigma defines how 'smooth' the Gaussian kernel makes the image. Higher sigma means a smoother image.",
+ )
+
+ self.min_dist = cellprofiler_core.setting.text.Integer(
+ text="Minimum distance between seeds",
+ value=1,
+ minval=0,
+ doc="""\
+Minimum number of pixels separating peaks in a region of `2 * min_distance + 1 `
+(i.e. peaks are separated by at least min_distance).
+To find the maximum number of peaks, set this value to `1`.
+""",
+ )
+
+ self.min_intensity = cellprofiler_core.setting.text.Float(
+ text="Minimum absolute internal distance",
+ value=0.0,
+ minval=0.0,
+ doc="""\
+Minimum absolute intensity threshold for seed generation. Since this threshold is
+applied to the distance transformed image, this defines a minimum object
+"size". Objects smaller than this size will not contain seeds.
+
+By default, the absolute threshold is the minimum value of the image.
+For distance transformed images, this value is `0` (or the background).
+""",
+ )
+
+ self.exclude_border = cellprofiler_core.setting.text.Integer(
+ text="Pixels from border to exclude",
+ value=0,
+ minval=0,
+ doc="Exclude seed generation from within `n` pixels of the image border.",
+ )
+
+ self.max_seeds = cellprofiler_core.setting.text.Integer(
+ text="Maximum number of seeds",
+ value=-1,
+ doc="""\
+Maximum number of seeds to generate. Default is no limit.
+When the number of seeds exceeds this number, seeds are chosen
+based on largest internal distance.
+""",
+ )
+
+ self.structuring_element = cellprofiler_core.setting.StructuringElement(
+ text="Structuring element for seed dilation",
+ doc="""\
+Structuring element to use for dilating the seeds.
+Volumetric images will require volumetric structuring elements.
+""",
+ )
+
+ self.connectivity = cellprofiler_core.setting.text.Integer(
+ text="Watershed connectivity",
+ value=1,
+ minval=1,
+ maxval=3,
+ doc="Connectivity for the watershed algorithm. Default is 1, maximum is number of dimensions of the image",
+ )
+
+ def settings(self):
+ __settings__ = super(DeclumpObjects, self).settings()
+
+ return __settings__ + [
+ self.declump_method,
+ self.reference_name,
+ self.gaussian_sigma,
+ self.min_dist,
+ self.min_intensity,
+ self.exclude_border,
+ self.max_seeds,
+ self.structuring_element,
+ self.connectivity,
+ ]
+
+ def visible_settings(self):
+ __settings__ = super(DeclumpObjects, self).visible_settings()
+
+ __settings__ += [self.declump_method]
+
+ if self.declump_method.value == O_INTENSITY:
+ __settings__ += [self.reference_name]
+
+ __settings__ += [
+ self.gaussian_sigma,
+ self.min_dist,
+ self.min_intensity,
+ self.exclude_border,
+ self.max_seeds,
+ self.structuring_element,
+ self.connectivity,
+ ]
+
+ return __settings__
+
+ def run(self, workspace):
+ x_name = self.x_name.value
+ y_name = self.y_name.value
+ object_set = workspace.object_set
+ images = workspace.image_set
+
+ x = object_set.get_objects(x_name)
+ x_data = x.segmented
+
+ strel_dim = self.structuring_element.value.ndim
+
+ im_dim = x.segmented.ndim
+
+ # Make sure structuring element matches image dimension
+ if strel_dim != im_dim:
+ raise ValueError(
+ "Structuring element does not match object dimensions: "
+ "{} != {}".format(strel_dim, im_dim)
+ )
+
+ # Get the segmentation distance transform
+ peak_image = scipy.ndimage.distance_transform_edt(x_data > 0)
+
+ # Generate a watershed ready image
+ if self.declump_method.value == O_SHAPE:
+ # Use the reverse of the image to get basins at peaks
+ # dist_transform = skimage.util.invert(dist_transform)
+ watershed_image = -peak_image
+ watershed_image -= watershed_image.min()
+
+ else:
+ reference_name = self.reference_name.value
+ reference = images.get_image(reference_name)
+ reference_data = reference.pixel_data
+
+ # Set the image as a float and rescale to full bit depth
+ watershed_image = skimage.img_as_float(reference_data, force_copy=True)
+ watershed_image -= watershed_image.min()
+ watershed_image = 1 - watershed_image
+
+ # Smooth the image
+ watershed_image = skimage.filters.gaussian(
+ watershed_image, sigma=self.gaussian_sigma.value
+ )
+
+ # Generate local peaks
+ seeds = skimage.feature.peak_local_max(
+ peak_image,
+ min_distance=self.min_dist.value,
+ threshold_rel=self.min_intensity.value,
+ exclude_border=self.exclude_border.value,
+ num_peaks=self.max_seeds.value if self.max_seeds.value != -1 else numpy.inf,
+ indices=False,
+ )
+
+ # Dilate seeds based on settings
+ seeds = skimage.morphology.binary_dilation(
+ seeds, self.structuring_element.value
+ )
+ seeds_dtype = (
+ numpy.int16 if x.count < numpy.iinfo(numpy.int16).max else numpy.int32
+ )
+
+ # NOTE: Not my work, the comments below are courtesy of Ray
+ #
+ # Create a marker array where the unlabeled image has a label of
+ # -(nobjects+1)
+ # and every local maximum has a unique label which will become
+ # the object's label. The labels are negative because that
+ # makes the watershed algorithm use FIFO for the pixels which
+ # yields fair boundaries when markers compete for pixels.
+ #
+ seeds = scipy.ndimage.label(seeds)[0]
+
+ markers = numpy.zeros_like(seeds, dtype=seeds_dtype)
+ markers[seeds > 0] = -seeds[seeds > 0]
+
+ # Perform the watershed
+ watershed_boundaries = skimage.morphology.watershed(
+ connectivity=self.connectivity.value,
+ image=watershed_image,
+ markers=markers,
+ mask=x_data != 0,
+ )
+
+ y_data = watershed_boundaries.copy()
+ # Copy the location of the "background"
+ zeros = numpy.where(y_data == 0)
+ # Re-shift all of the labels into the positive realm
+ y_data += numpy.abs(numpy.min(y_data)) + 1
+ # Re-apply the background
+ y_data[zeros] = 0
+
+ objects = cellprofiler_core.object.Objects()
+ objects.segmented = y_data.astype(numpy.uint16)
+ objects.parent_image = x.parent_image
+
+ object_set.add_objects(objects, y_name)
+
+ self.add_measurements(workspace)
+
+ if self.show_window:
+ workspace.display_data.x_data = x.segmented
+
+ workspace.display_data.y_data = y_data
+
+ workspace.display_data.dimensions = x.dimensions
diff --git a/CP5/active_plugins/distancetransform.py b/CP5/active_plugins/distancetransform.py
new file mode 100644
index 00000000..46e7f2fd
--- /dev/null
+++ b/CP5/active_plugins/distancetransform.py
@@ -0,0 +1,106 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import logging
+import scipy.ndimage
+import numpy
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import cellprofiler_core.image
+import cellprofiler_core.module
+import cellprofiler_core.setting
+from cellprofiler_core.setting import Binary
+
+__doc__ = """\
+DistanceTransform
+=================
+
+**DistanceTransform** computes the distance transform of a binary image.
+The distance of each foreground pixel is computed to the nearest background pixel.
+The resulting image is then scaled so that the largest distance is 1.
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES YES
+============ ============ ===============
+
+"""
+
+
+class DistanceTransform(cellprofiler_core.module.ImageProcessing):
+ module_name = "DistanceTransform"
+
+ variable_revision_number = 1
+
+ def create_settings(self):
+ super(DistanceTransform, self).create_settings()
+
+ self.rescale_values = Binary(
+ "Rescale values from 0 to 1?",
+ True,
+ doc="""\
+Select "*Yes*" to rescale the transformed values to lie between 0 and
+1. This is the option to use if the distance transformed image is to be
+used for thresholding by an **Identify** module or the like, which
+assumes a 0-1 scaling.
+
+Select "*No*" to leave the values in absolute pixel units. This useful
+in cases where the actual pixel distances are to be used downstream as
+input for a measurement module.""",
+ )
+
+ def settings(self):
+ __settings__ = super(DistanceTransform, self).settings()
+ __settings__ += [
+ self.rescale_values,
+ ]
+ return __settings__
+
+ def visible_settings(self):
+ """Return the settings as displayed to the user"""
+ __settings__ = super(DistanceTransform, self).settings()
+ __settings__ += [self.rescale_values]
+ return __settings__
+
+ def run(self, workspace):
+ x_name = self.x_name.value
+
+ y_name = self.y_name.value
+
+ images = workspace.image_set
+
+ x = images.get_image(x_name)
+
+ dimensions = x.dimensions
+
+ x_data = x.pixel_data
+
+ y_data = scipy.ndimage.distance_transform_edt(x_data, sampling=x.spacing)
+
+ if self.rescale_values.value:
+ y_data = y_data / numpy.max(y_data)
+
+ y = cellprofiler_core.image.Image(
+ dimensions=dimensions, image=y_data, parent_image=x
+ )
+
+ images.add(y_name, y)
+
+ if self.show_window:
+ workspace.display_data.x_data = x_data
+ workspace.display_data.y_data = y_data
+ workspace.display_data.dimensions = dimensions
+
+ def volumetric(self):
+ return True
diff --git a/CP5/active_plugins/enhancedmeasuretexture.py b/CP5/active_plugins/enhancedmeasuretexture.py
new file mode 100644
index 00000000..bc651938
--- /dev/null
+++ b/CP5/active_plugins/enhancedmeasuretexture.py
@@ -0,0 +1,1249 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import numpy as np
+import scipy.ndimage as scind
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import calculatemoments as cpmoments
+import cellprofiler_core.module as cpm
+import cellprofiler_core.object as cpo
+import cellprofiler_core.setting as cps
+import cellprofiler_core.measurement as cpmeas
+from cellprofiler_core.constants.measurement import COLTYPE_FLOAT
+from cellprofiler_core.setting.do_something import DoSomething
+from cellprofiler_core.setting.multichoice import MultiChoice
+from cellprofiler_core.setting.subscriber import ImageSubscriber, LabelSubscriber
+from cellprofiler_core.setting.text import Integer
+from cellprofiler_core.utilities.core.object import size_similarly
+from centrosome.cpmorphology import fixup_scipy_ndimage_result as fix
+from centrosome.haralick import Haralick, normalized_per_object
+from centrosome.filter import gabor, stretch
+
+__version__ = "$Revision$"
+
+__doc__ = """\
+EnhancedMeasureTexture
+======================
+
+**EnhancedMeasureTexture** measures the degree and nature of textures within an image or objects.
+
+This module measures the variations in grayscale images. An object (or
+entire image) without much texture has a smooth appearance; an
+object or image with a lot of texture will appear rough and show a wide
+variety of pixel intensities.
+
+This module can also measure textures of objects against grayscale images.
+Any input objects specified will have their texture measured against *all* input
+images specified, which may lead to image-object texture combinations that are unnecessary.
+If you do not want this behavior, use multiple **EnhancedMeasureTexture** modules to
+specify the particular image-object measures that you want.
+
+Available measurements:
+
+**Haralick Features**: Haralick texture features are derived from the
+co-occurrence matrix, which contains information about how image intensities in pixels with a
+certain position in relation to each other occur together. **EnhancedMeasureTexture**
+can measure textures at different scales; the scale you choose determines
+how the co-occurrence matrix is constructed.
+For example, if you choose a scale of 2, each pixel in the image (excluding
+some border pixels) will be compared against the one that is two pixels to
+the right. **EnhancedMeasureTexture** quantizes the image into eight intensity
+levels. There are then 8x8 possible ways to categorize a pixel with its
+scale-neighbor. **EnhancedMeasureTexture** forms the 8x8 co-occurrence matrix
+by counting how many pixels and neighbors have each of the 8x8 intensity
+combinations. Thirteen features are then calculated for the image by performing
+mathematical operations on the co-occurrence matrix (the formulas can be found `here`_):
+
+- Angular Second Moment
+- Contrast
+- Correlation
+- Sum of Squares: Variation
+- Inverse Difference Moment
+- Sum Average
+- Sum Variance
+- Sum Entropy
+- Entropy
+- Difference Variance
+- Difference Entropy
+- Information Measure of Correlation 1
+- Information Measure of Correlation 2
+
+**Gabor "wavelet" features**: These features are similar to wavelet features,
+and they are obtained by applying so-called Gabor filters to the image. The Gabor
+filters measure the frequency content in different orientations. They are very
+similar to wavelets, and in the current context they work exactly as wavelets, but
+they are not wavelets by a strict mathematical definition. The Gabor
+features detect correlated bands of intensities, for instance, images of
+Venetian blinds would have high scores in the horizontal orientation.
+
+
+Technical notes
+^^^^^^^^^^^^^^^
+**EnhancedMeasureTexture** performs the following algorithm to compute a score
+at each scale using the Gabor filter:
+
+Divide the half-circle from 0 to 180 degrees; by the number of desired
+angles. For instance, if the user chooses two angles, EnhancedMeasureTexture
+uses 0 and 90 degrees; (horizontal and vertical) for the filter
+orientations. This is the Theta value from the reference paper.
+For each angle, compute the Gabor filter for each object in the image
+at two phases separated by 90 degrees; in order to account for texture
+features whose peaks fall on even or odd quarter-wavelengths.
+Multiply the image times each Gabor filter and sum over the pixels
+in each object.
+Take the square root of the sum of the squares of the two filter scores.
+This results in one score per Theta.
+Save the maximum score over all Theta as the score at the desired scale.
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES YES
+============ ============ ===============
+
+
+References
+^^^^^^^^^^
+Haralick et al. (1973), "Textural Features for Image
+Classification," IEEE Transaction on Systems Man, Cybernetics,
+SMC-3(6):610-621.
+
+Gabor D. (1946). "Theory of communication,"
+Journal of the Institute of Electrical Engineers 93:429-441.
+
+.. _here: http://murphylab.web.cmu.edu/publications/boland/boland_node26.html
+
+"""
+
+"""The category of the per-object measurements made by this module"""
+TEXTURE = "Texture"
+
+"""The "name" slot in the object group dictionary entry"""
+OG_NAME = "name"
+"""The "remove" slot in the object group dictionary entry"""
+OG_REMOVE = "remove"
+
+F_HARALICK = """AngularSecondMoment Contrast Correlation Variance
+InverseDifferenceMoment SumAverage SumVariance SumEntropy Entropy
+DifferenceVariance DifferenceEntropy InfoMeas1 InfoMeas2""".split()
+
+F_GABOR = "Gabor"
+
+H_HORIZONTAL = "Horizontal"
+A_HORIZONTAL = "0"
+H_VERTICAL = "Vertical"
+A_VERTICAL = "90"
+H_DIAGONAL = "Diagonal"
+A_DIAGONAL = "45"
+H_ANTIDIAGONAL = "Anti-diagonal"
+A_ANTIDIAGONAL = "135"
+H_ALL = [H_HORIZONTAL, H_VERTICAL, H_DIAGONAL, H_ANTIDIAGONAL]
+
+H_TO_A = {
+ H_HORIZONTAL: A_HORIZONTAL,
+ H_VERTICAL: A_VERTICAL,
+ H_DIAGONAL: A_DIAGONAL,
+ H_ANTIDIAGONAL: A_ANTIDIAGONAL,
+}
+
+F_TAMURA = "Tamura"
+F_1 = "Coarseness"
+F_2 = "Contrast"
+F_3 = "Directionality"
+F_ALL = [F_1, F_2, F_3]
+
+HIST_COARS_BINS = 3
+NB_SCALES = 5
+DIR_BINS = 125
+
+
+class EnhancedMeasureTexture(cpm.Module):
+
+ module_name = "EnhancedMeasureTexture"
+ variable_revision_number = 3
+ category = "Measurement"
+
+ def create_settings(self):
+ """Create the settings for the module at startup.
+
+ The module allows for an unlimited number of measured objects, each
+ of which has an entry in self.object_groups.
+ """
+ self.image_groups = []
+ self.object_groups = []
+ self.scale_groups = []
+ self.image_count = cps.HiddenCount(self.image_groups)
+ self.object_count = cps.HiddenCount(self.object_groups)
+ self.scale_count = cps.HiddenCount(self.scale_groups)
+ self.add_image_cb(can_remove=False)
+ self.add_images = DoSomething("", "Add another image", self.add_image_cb)
+ self.image_divider = cps.Divider()
+ self.add_object_cb(can_remove=True)
+ self.add_objects = DoSomething("", "Add another object", self.add_object_cb)
+ self.object_divider = cps.Divider()
+ self.add_scale_cb(can_remove=False)
+ self.add_scales = DoSomething("", "Add another scale", self.add_scale_cb)
+ self.scale_divider = cps.Divider()
+
+ self.wants_gabor = cps.Binary(
+ "Measure Gabor features?",
+ True,
+ doc="""The Gabor features measure striped texture in an object. They
+ take a substantial time to calculate. Check this setting to
+ measure the Gabor features. Uncheck this setting to skip
+ the Gabor feature calculation if it is not informative for your
+ images""",
+ )
+ self.gabor_angles = Integer(
+ "Number of angles to compute for Gabor",
+ 4,
+ 2,
+ doc="""
+ (Used only if Gabor features are measured)
+ How many angles do you want to use for each Gabor texture measurement?
+ The default value is 4 which detects bands in the horizontal, vertical and diagonal
+ orientations.""",
+ )
+ self.gabor_divider = cps.Divider()
+
+ self.wants_tamura = cps.Binary(
+ "Measure Tamura features?",
+ True,
+ doc="""The Tamura features are very ugly.""",
+ )
+ self.tamura_feats = MultiChoice(
+ "Features to compute",
+ F_ALL,
+ F_ALL,
+ doc="""Tamura Features:
+
+ %(F_1)s - bla.
+ %(F_2)s - bla.
+ %(F_3)s - bla.
+
+ Choose one or more features to compute."""
+ % globals(),
+ )
+
+ def settings(self):
+ """The settings as they appear in the save file."""
+ result = [self.image_count, self.object_count, self.scale_count]
+ for groups, elements in [
+ (self.image_groups, ["image_name"]),
+ (self.object_groups, ["object_name"]),
+ (self.scale_groups, ["scale", "angles"]),
+ ]:
+ for group in groups:
+ for element in elements:
+ result += [getattr(group, element)]
+ result += [self.wants_gabor, self.gabor_angles]
+ result += [self.wants_tamura, self.tamura_feats]
+ return result
+
+ def prepare_settings(self, setting_values):
+ """Adjust the number of object groups based on the number of
+ setting_values"""
+ for count, sequence, fn in (
+ (int(setting_values[0]), self.image_groups, self.add_image_cb),
+ (int(setting_values[1]), self.object_groups, self.add_object_cb),
+ (int(setting_values[2]), self.scale_groups, self.add_scale_cb),
+ ):
+ del sequence[count:]
+ while len(sequence) < count:
+ fn()
+
+ def visible_settings(self):
+ """The settings as they appear in the module viewer"""
+ result = []
+ for groups, add_button, div in [
+ (self.image_groups, self.add_images, self.image_divider),
+ (self.object_groups, self.add_objects, self.object_divider),
+ (self.scale_groups, self.add_scales, self.scale_divider),
+ ]:
+ for group in groups:
+ result += group.visible_settings()
+ result += [add_button, div]
+
+ result += [self.wants_gabor]
+ if self.wants_gabor:
+ result += [self.gabor_angles]
+ result += [self.gabor_divider]
+
+ result += [self.wants_tamura]
+ if self.wants_tamura:
+ result += [self.tamura_feats]
+ return result
+
+ def add_image_cb(self, can_remove=True):
+ """Add an image to the image_groups collection
+
+ can_delete - set this to False to keep from showing the "remove"
+ button for images that must be present.
+ """
+ group = cps.SettingsGroup()
+ if can_remove:
+ group.append("divider", cps.Divider(line=False))
+ group.append(
+ "image_name",
+ ImageSubscriber(
+ "Select an image to measure",
+ "None",
+ doc="""
+ What did you call the grayscale images whose texture you want to measure?""",
+ ),
+ )
+ if can_remove:
+ group.append(
+ "remover",
+ cps.do_something.RemoveSettingButton(
+ "", "Remove this image", self.image_groups, group
+ ),
+ )
+ self.image_groups.append(group)
+
+ def add_object_cb(self, can_remove=True):
+ """Add an object to the object_groups collection
+
+ can_delete - set this to False to keep from showing the "remove"
+ button for objects that must be present.
+ """
+ group = cps.SettingsGroup()
+ if can_remove:
+ group.append("divider", cps.Divider(line=False))
+ group.append(
+ "object_name",
+ LabelSubscriber(
+ "Select objects to measure",
+ "None",
+ doc="""
+ What did you call the objects whose texture you want to measure?
+ If you only want to measure the texture
+ for the image overall, you can remove all objects using the "Remove this object" button.
+ Objects specified here will have their
+ texture measured against all images specified above, which
+ may lead to image-object combinations that are unnecessary. If you
+ do not want this behavior, use multiple EnhancedMeasureTexture
+ modules to specify the particular image-object measures that you want.""",
+ ),
+ )
+ if can_remove:
+ group.append(
+ "remover",
+ cps.do_something.RemoveSettingButton(
+ "", "Remove this object", self.object_groups, group
+ ),
+ )
+ self.object_groups.append(group)
+
+ def add_scale_cb(self, can_remove=True):
+ """Add a scale to the scale_groups collection
+
+ can_delete - set this to False to keep from showing the "remove"
+ button for scales that must be present.
+ """
+ group = cps.SettingsGroup()
+ if can_remove:
+ group.append("divider", cps.Divider(line=False))
+ group.append(
+ "scale",
+ Integer(
+ "Texture scale to measure",
+ len(self.scale_groups) + 3,
+ doc="""You can specify the scale of texture to be measured, in pixel units;
+ the texture scale is the distance between correlated intensities in the image. A
+ higher number for the scale of texture measures larger patterns of
+ texture whereas smaller numbers measure more localized patterns of
+ texture. It is best to measure texture on a scale smaller than your
+ objects' sizes, so be sure that the value entered for scale of texture is
+ smaller than most of your objects. For very small objects (smaller than
+ the scale of texture you are measuring), the texture cannot be measured
+ and will result in a undefined value in the output file.""",
+ ),
+ )
+ group.append(
+ "angles",
+ MultiChoice(
+ "Angles to measure",
+ H_ALL,
+ H_ALL,
+ doc="""The Haralick texture measurements are based on the correlation
+ between pixels offset by the scale in one of four directions:
+
+ %(H_HORIZONTAL)s - the correlated pixel is "scale" pixels
+ to the right of the pixel of interest.
+ %(H_VERTICAL)s - the correlated pixel is "scale" pixels
+ below the pixel of interest.
+ %(H_DIAGONAL)s - the correlated pixel is "scale" pixels
+ to the right and "scale" pixels below the pixel of interest.
+ %(H_ANTIDIAGONAL)s - the correlated pixel is "scale"
+ pixels to the left and "scale" pixels below the pixel of interest.
+
+ Choose one or more directions to measure."""
+ % globals(),
+ ),
+ )
+
+ if can_remove:
+ group.append(
+ "remover",
+ cps.do_something.RemoveSettingButton(
+ "", "Remove this scale", self.scale_groups, group
+ ),
+ )
+ self.scale_groups.append(group)
+
+ def validate_module(self, pipeline):
+ """Make sure chosen objects, images and scales are selected only once"""
+ images = set()
+ for group in self.image_groups:
+ if group.image_name.value in images:
+ raise cps.ValidationError(
+ "%s has already been selected" % group.image_name.value,
+ group.image_name,
+ )
+ images.add(group.image_name.value)
+
+ objects = set()
+ for group in self.object_groups:
+ if group.object_name.value in objects:
+ raise cps.ValidationError(
+ "%s has already been selected" % group.object_name.value,
+ group.object_name,
+ )
+ objects.add(group.object_name.value)
+
+ scales = set()
+ for group in self.scale_groups:
+ if group.scale.value in scales:
+ raise cps.ValidationError(
+ "%s has already been selected" % group.scale.value, group.scale
+ )
+ scales.add(group.scale.value)
+
+ def get_categories(self, pipeline, object_name):
+ """Get the measurement categories supplied for the given object name.
+
+ pipeline - pipeline being run
+ object_name - name of labels in question (or 'Images')
+ returns a list of category names
+ """
+ if any([object_name == og.object_name for og in self.object_groups]):
+ return [TEXTURE]
+ elif object_name == "Image":
+ return [TEXTURE]
+ else:
+ return []
+
+ def get_features(self):
+ """Return the feature names for this pipeline's configuration"""
+ return (
+ F_HARALICK
+ + ([F_GABOR] if self.wants_gabor else [])
+ + ([F_TAMURA] if self.wants_tamura else [])
+ )
+
+ def get_measurements(self, pipeline, object_name, category):
+ """Get the measurements made on the given object in the given category
+ pipeline - pipeline being run
+ object_name - name of objects being measured
+ category - measurement category
+ """
+ if category in self.get_categories(pipeline, object_name):
+ return self.get_features()
+ return []
+
+ def get_measurement_images(self, pipeline, object_name, category, measurement):
+ """Get the list of images measured
+ pipeline - pipeline being run
+ object_name - name of objects being measured
+ category - measurement category
+ measurement - measurement made on images
+ """
+ measurements = self.get_measurements(pipeline, object_name, category)
+ if measurement in measurements:
+ return [x.image_name.value for x in self.image_groups]
+ return []
+
+ def get_measurement_scales(
+ self, pipeline, object_name, category, measurement, image_name
+ ):
+ """Get the list of scales at which the measurement was taken
+
+ pipeline - pipeline being run
+ object_name - name of objects being measured
+ category - measurement category
+ measurement - name of measurement made
+ image_name - name of image that was measured
+ """
+ if (
+ len(
+ self.get_measurement_images(
+ pipeline, object_name, category, measurement
+ )
+ )
+ > 0
+ ):
+ if measurement == F_GABOR:
+ return [x.scale.value for x in self.scale_groups]
+ if measurement == F_TAMURA:
+ return []
+ return sum(
+ [
+ [
+ "%d_%s" % (x.scale.value, H_TO_A[h])
+ for h in x.angles.get_selections()
+ ]
+ for x in self.scale_groups
+ ],
+ [],
+ )
+ return []
+
+ def get_measurement_columns(self, pipeline):
+ """Get column names output for each measurement."""
+ cols = []
+ for feature in self.get_features():
+ for im in self.image_groups:
+ if feature == F_TAMURA:
+ for f in F_ALL:
+ cols += [
+ (
+ "Image",
+ "%s_%s_%s_%s"
+ % (TEXTURE, feature, f, im.image_name.value),
+ COLTYPE_FLOAT,
+ )
+ ]
+ for b in range(0, HIST_COARS_BINS):
+ cols += [
+ (
+ "Image",
+ "%s_%s_CoarsenessHist_%dBinsHist_Bin%d_%s"
+ % (
+ TEXTURE,
+ feature,
+ HIST_COARS_BINS,
+ b,
+ im.image_name.value,
+ ),
+ COLTYPE_FLOAT,
+ )
+ ]
+ else:
+ for sg in self.scale_groups:
+ if feature == F_GABOR:
+ cols += [
+ (
+ "Image",
+ "%s_%s_%s_%d"
+ % (
+ TEXTURE,
+ feature,
+ im.image_name.value,
+ sg.scale.value,
+ ),
+ COLTYPE_FLOAT,
+ )
+ ]
+ else:
+ for angle in sg.angles.get_selections():
+ cols += [
+ (
+ "Image",
+ "%s_%s_%s_%d_%s"
+ % (
+ TEXTURE,
+ feature,
+ im.image_name.value,
+ sg.scale.value,
+ H_TO_A[angle],
+ ),
+ COLTYPE_FLOAT,
+ )
+ ]
+
+ for ob in self.object_groups:
+ for feature in self.get_features():
+ for im in self.image_groups:
+ if feature == F_TAMURA:
+ for f in F_ALL:
+ cols += [
+ (
+ ob.object_name.value,
+ "%s_%s_%s_%s"
+ % (TEXTURE, feature, f, im.image_name.value),
+ COLTYPE_FLOAT,
+ )
+ ]
+ for b in range(0, HIST_COARS_BINS):
+ cols += [
+ (
+ "Image",
+ "%s_%s_CoarsenessHist_%dBinsHist_Bin%d_%s"
+ % (
+ TEXTURE,
+ feature,
+ HIST_COARS_BINS,
+ b,
+ im.image_name.value,
+ ),
+ COLTYPE_FLOAT,
+ )
+ ]
+ else:
+ for sg in self.scale_groups:
+ if feature == F_GABOR:
+ cols += [
+ (
+ ob.object_name.value,
+ "%s_%s_%s_%d"
+ % (
+ TEXTURE,
+ feature,
+ im.image_name.value,
+ sg.scale.value,
+ ),
+ COLTYPE_FLOAT,
+ )
+ ]
+ else:
+ for angle in sg.angles.get_selections():
+ cols += [
+ (
+ ob.object_name.value,
+ "%s_%s_%s_%d_%s"
+ % (
+ TEXTURE,
+ feature,
+ im.image_name.value,
+ sg.scale.value,
+ H_TO_A[angle],
+ ),
+ COLTYPE_FLOAT,
+ )
+ ]
+
+ return cols
+
+ def is_interactive(self):
+ return False
+
+ def run(self, workspace):
+ """Run, computing the area measurements for the objects"""
+
+ statistics = [["Image", "Object", "Measurement", "Scale", "Value"]]
+ for image_group in self.image_groups:
+ image_name = image_group.image_name.value
+
+ if self.wants_tamura:
+ statistics += self.run_image_tamura(image_name, workspace)
+ for object_group in self.object_groups:
+ object_name = object_group.object_name.value
+ statistics += self.run_one_tamura(
+ image_name, object_name, workspace
+ )
+
+ for scale_group in self.scale_groups:
+ scale = scale_group.scale.value
+ if self.wants_gabor:
+ statistics += self.run_image_gabor(image_name, scale, workspace)
+ for angle in scale_group.angles.get_selections():
+ statistics += self.run_image(image_name, scale, angle, workspace)
+ for object_group in self.object_groups:
+ object_name = object_group.object_name.value
+ for angle in scale_group.angles.get_selections():
+ statistics += self.run_one(
+ image_name, object_name, scale, angle, workspace
+ )
+ if self.wants_gabor:
+ statistics += self.run_one_gabor(
+ image_name, object_name, scale, workspace
+ )
+ if workspace.frame is not None:
+ workspace.display_data.statistics = statistics
+
+ def display(self, workspace):
+ figure = workspace.create_or_find_figure(
+ title="EnhancedMeasureTexture, image cycle #%d"
+ % (workspace.measurements.image_set_number),
+ subplots=(1, 1),
+ )
+ figure.subplot_table(
+ 0,
+ 0,
+ workspace.display_data.statistics,
+ ratio=(0.20, 0.20, 0.20, 0.20, 0.20),
+ )
+
+ def run_one(self, image_name, object_name, scale, angle, workspace):
+ """Run, computing the area measurements for a single map of objects"""
+ statistics = []
+ image = workspace.image_set.get_image(image_name, must_be_grayscale=True)
+ objects = workspace.get_objects(object_name)
+ pixel_data = image.pixel_data
+ if image.has_mask:
+ mask = image.mask
+ else:
+ mask = None
+ labels = objects.segmented
+ try:
+ pixel_data = objects.crop_image_similarly(pixel_data)
+ except ValueError:
+ #
+ # Recover by cropping the image to the labels
+ #
+ pixel_data, m1 = size_similarly(labels, pixel_data)
+ if np.any(~m1):
+ if mask is None:
+ mask = m1
+ else:
+ mask, m2 = size_similarly(labels, mask)
+ mask[~m2] = False
+
+ if np.all(labels == 0):
+ for name in F_HARALICK:
+ statistics += self.record_measurement(
+ workspace,
+ image_name,
+ object_name,
+ str(scale) + "_" + H_TO_A[angle],
+ name,
+ np.zeros((0,)),
+ )
+ else:
+ scale_i, scale_j = self.get_angle_ij(angle, scale)
+
+ for name, value in zip(
+ F_HARALICK,
+ Haralick(pixel_data, labels, scale_i, scale_j, mask=mask).all(),
+ ):
+ statistics += self.record_measurement(
+ workspace,
+ image_name,
+ object_name,
+ str(scale) + "_" + H_TO_A[angle],
+ name,
+ value,
+ )
+ return statistics
+
+ def get_angle_ij(self, angle, scale):
+ if angle == H_VERTICAL:
+ return scale, 0
+ elif angle == H_HORIZONTAL:
+ return 0, scale
+ elif angle == H_DIAGONAL:
+ return scale, scale
+ elif angle == H_ANTIDIAGONAL:
+ return scale, -scale
+
+ def localmean(self, x, y, k, cum_sum):
+ nx = len(cum_sum[0])
+ ny = len(cum_sum)
+
+ hk = k / 2
+ startx = int(max(0, x - hk))
+ starty = int(max(0, y - hk))
+ stopx = int(min(nx - 1, x + hk - 1))
+ stopy = int(min(ny - 1, y + hk - 1))
+
+ if startx == 0:
+ left = 0.0
+ else:
+ left = cum_sum[stopy, startx - 1]
+ if starty == 0:
+ up = 0.0
+ else:
+ up = cum_sum[starty - 1, stopx]
+ if startx == 0 or starty == 0:
+ upleft = 0.0
+ else:
+ upleft = cum_sum[starty - 1, startx - 1]
+
+ down = cum_sum[stopy, stopx]
+ area = (stopy - starty + 1) * (stopx - startx + 1)
+ mean = (down - left - up + upleft) / float(area)
+ return mean
+
+ def fast_local_mean(self, Lk, pixels, cum_sum):
+ """Compute the local mean using the cumulative sum and matrix arithmetic
+
+ Lk - the sampling window (I reproduced what you had in the code
+ which makes an Lk of 2 be just the pixel, of 4 be a 3x3, etc.
+
+ pixels - the image
+
+ cum_sum - the cumulative sum of the pixels in both directions
+ """
+ if Lk == 2:
+ # This is the value at the pixel, no neighborhood
+ return pixels
+ nx = len(pixels[0])
+ ny = len(pixels)
+ hLk = Lk // 2
+ result = np.zeros(pixels.shape, pixels.dtype)
+ result[hLk : -(hLk - 1), hLk : -(hLk - 1)] = (
+ cum_sum[(Lk - 1) :, (Lk - 1) :]
+ - cum_sum[: -(Lk - 1), (Lk - 1) :]
+ - cum_sum[(Lk - 1) :, : -(Lk - 1)]
+ + cum_sum[: -(Lk - 1), : -(Lk - 1)]
+ ) / ((Lk - 1) * (Lk - 1))
+ for x in list(range(0, hLk)) + list(range(nx - hLk + 1, nx)):
+ for y in range(0, ny):
+ result[y, x] = self.localmean(x, y, Lk, cum_sum)
+ for x in range(hLk, nx - hLk + 1):
+ for y in list(range(0, hLk)) + list(range(ny - hLk, ny)):
+ result[y, x] = self.localmean(x, y, Lk, cum_sum)
+ return result
+
+ def coarseness(self, pixels):
+ nx = len(pixels[0])
+ ny = len(pixels)
+ Ak = np.zeros([NB_SCALES, ny, nx])
+ Ekh = np.zeros([NB_SCALES, ny, nx])
+ Ekv = np.zeros([NB_SCALES, ny, nx])
+ Sbest = np.zeros([ny, nx])
+
+ cum_sum = np.cumsum(np.cumsum(pixels, 0), 1)
+
+ # 1st Step
+ Lk = 1
+ for k in range(0, NB_SCALES):
+ Lk = Lk * 2 # tamura.cpp
+ Ak[k, :, :] = self.fast_local_mean(Lk, pixels, cum_sum)
+
+ # 2nd Step
+ Lk = 1
+ y, x = np.mgrid[0:ny, 0:nx]
+ for k in range(0, NB_SCALES):
+ Lk = Lk * 2 # tamura.cpp
+ x_good = (x + (Lk / 2) < nx) & (x - (Lk / 2) >= 0)
+ x1, y1 = x[x_good], y[x_good]
+ Ekh[k, y1, x1] = np.fabs(
+ Ak[k, y1, x1 + (Lk // 2)] - Ak[k, y1, x1 - (Lk // 2)]
+ )
+ y_good = (y + (Lk / 2) < ny) & (y - (Lk / 2) >= 0)
+ x1, y1 = x[y_good], y[y_good]
+ Ekv[k, y1, x1] = np.fabs(
+ Ak[k, y1 + (Lk // 2), x1] - Ak[k, y1 - (Lk // 2), x1]
+ )
+
+ # 3rd Step
+ # Here, I compare the current k for the x / y grid to
+ # the current best.
+ for k in range(1, NB_SCALES):
+ new_best = Ekh[k, y, x] > Ekh[Sbest.astype(int), y, x]
+ Sbest[new_best] = k
+
+ # As in tamura.cpp: why 32?
+ # Fcoars=np.sum(Sbest)
+ # if nx==32 or ny==32:
+ # Fcoars=Fcoars/((nx+1-32)*(ny+1-32))
+ # else:
+ # Fcoars=Fcoars/((nx-32)*(ny-32))
+
+ # As in paper:
+ Fcoars = np.sum(Sbest) / (nx * ny)
+ hist, junk = np.histogram(Sbest, bins=HIST_COARS_BINS)
+ hist = np.array(hist, dtype=float)
+ hist = hist / max(hist)
+
+ return Fcoars, hist
+
+ def contrast(self, pixels):
+ std = np.std(pixels)
+ kurt = cpmoments.kurtosis(pixels)
+ if std < 0.0000000001:
+ Fcont = 0.0
+ elif kurt <= 0:
+ Fcont = 0.0
+ else:
+ Fcont = std / np.power(kurt, 0.25)
+ return Fcont
+
+ def directionality(self, pixels):
+ nx = len(pixels[0])
+ ny = len(pixels)
+
+ dH = np.array(pixels).copy()
+ dV = np.array(pixels).copy()
+
+ # Prewitt's
+ fH = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
+ fV = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
+
+ # Borders are zeros, just as in convolve2D
+ cH = np.zeros([ny, nx])
+ cV = np.zeros([ny, nx])
+ cH[1 : len(cH) - 1, 1 : len(cH[0]) - 1] = scind.filters.convolve(
+ dH, fH, mode="constant"
+ )[1 : len(cH) - 1, 1 : len(cH[0]) - 1]
+ # sp.convolve2d(dH,fH,mode='valid')
+ cV[1 : len(cV) - 1, 1 : len(cV[0]) - 1] = scind.filters.convolve(
+ dV, fV, mode="constant"
+ )[1 : len(cV) - 1, 1 : len(cV[0]) - 1]
+ # sp.convolve2d(dV,fV,mode='valid')
+
+ # Borders are not only zeros
+ # cH=np.zeros([ny, nx])
+ # cV=np.zeros([ny, nx])
+ # cH=scind.convolve(dH,fH,mode='constant')
+ # cV=scind.convolve(dV,fV,mode='constant')
+
+ theta = np.zeros([ny, nx])
+ rsum = 0.0 # tamura.cpp
+ for y in range(0, ny):
+ for x in range(0, nx):
+ # Version tamura.cpp
+ if cH[y, x] >= 0.0001:
+ theta[y, x] = np.arctan(cV[y, x] / cH[y, x]) + (np.pi / 2.0 + 0.001)
+ rsum = (
+ rsum
+ + (cH[y, x] * cH[y, x])
+ + (cV[y, x] * cV[y, x])
+ + (theta[y, x] * theta[y, x])
+ )
+ else:
+ theta[y, x] = 0.0
+ # Version tamura.m
+ # if cH[y,x]==0 and cV[y,x]==0: theta[y,x]=0.0
+ # elif cH[y,x]==0: theta[y,x]=np.pi
+ # else: theta[y,x]=np.arctan(cV[y,x]/cH[y,x])+(np.pi/2.0)
+
+ # Version tamura.cpp
+ hist, junk = np.histogram(theta, bins=DIR_BINS)
+ bmx = hist.argmax()
+ hsum = 0.0
+ for b in range(0, DIR_BINS):
+ hsum = hsum + (hist[b] * np.power(b + 1 - bmx, 2))
+ Fdir = np.fabs(np.log(hsum / (rsum + 0.0000001)))
+
+ # Version tamura.m
+ # phi=[float(i)/10000 for i in range(0,31416)]
+ # hist, junk=np.histogram(theta, bins=phi)
+ # hist=np.array(hist, dtype=float)
+ # hist=hist/(nx*ny)
+ # hist2=hist.copy()
+ # for b in range(0,len(hist2)):
+ # if hist[b]<0.01: hist2[b]=0
+ # bmx=hist2.argmax()
+ # phiP=bmx*0.0001
+ # Fdir=0.0
+ # for b in range(0,len(hist2)):
+ # Fdir=Fdir+(np.power(phi[b]-phiP,2)*hist2[b])
+
+ return Fdir
+
+ def run_image(self, image_name, scale, angle, workspace):
+ """Run measurements on image"""
+ statistics = []
+ image = workspace.image_set.get_image(image_name, must_be_grayscale=True)
+ pixel_data = image.pixel_data
+ image_labels = np.ones(pixel_data.shape, int)
+ if image.has_mask:
+ image_labels[~image.mask] = 0
+ scale_i, scale_j = self.get_angle_ij(angle, scale)
+ for name, value in zip(
+ F_HARALICK, Haralick(pixel_data, image_labels, scale_i, scale_j).all()
+ ):
+ statistics += self.record_image_measurement(
+ workspace, image_name, str(scale) + "_" + H_TO_A[angle], name, value
+ )
+ return statistics
+
+ def run_image_tamura(self, image_name, workspace):
+ """Run measurements on image"""
+ statistics = []
+ image = workspace.image_set.get_image(image_name, must_be_grayscale=True)
+ pixel_data = image.pixel_data
+ image_labels = np.ones(pixel_data.shape, int)
+ if image.has_mask:
+ image_labels[~image.mask] = 0
+
+ for name, fn in [(F_2, self.contrast), (F_3, self.directionality)]:
+ value = fn(pixel_data)
+ statistics += self.record_image_measurement(
+ workspace, image_name, "-", "%s_%s" % (F_TAMURA, name), value
+ )
+
+ value, hist = self.coarseness(pixel_data)
+ statistics += self.record_image_measurement(
+ workspace, image_name, "-", "%s_%s" % (F_TAMURA, F_1), value
+ )
+
+ for b in range(0, HIST_COARS_BINS):
+ name = "CoarsenessHist_%dBinsHist_Bin%d" % (HIST_COARS_BINS, b)
+ value = hist[b]
+ statistics += self.record_image_measurement(
+ workspace, image_name, "-", "%s_%s" % (F_TAMURA, name), value
+ )
+
+ return statistics
+
+ def run_one_tamura(self, image_name, object_name, workspace):
+ """Run, computing the area measurements for a single map of objects"""
+ statistics = []
+ image = workspace.image_set.get_image(image_name, must_be_grayscale=True)
+ objects = workspace.get_objects(object_name)
+ pixel_data = image.pixel_data
+ if image.has_mask:
+ mask = image.mask
+ else:
+ mask = None
+ labels = objects.segmented
+ try:
+ pixel_data = objects.crop_image_similarly(pixel_data)
+ except ValueError:
+ #
+ # Recover by cropping the image to the labels
+ #
+ pixel_data, m1 = size_similarly(labels, pixel_data)
+ if np.any(~m1):
+ if mask is None:
+ mask = m1
+ else:
+ mask, m2 = size_similarly(labels, mask)
+ mask[~m2] = False
+
+ if np.all(labels == 0):
+ for name in F_ALL:
+ statistics += self.record_measurement(
+ workspace,
+ image_name,
+ object_name,
+ "",
+ "%s_%s" % (F_TAMURA, name),
+ np.zeros((0,)),
+ )
+ else:
+ labs = np.unique(labels)
+ values = np.zeros([np.max(labs) + 1, 2])
+ for l in labs:
+ if l != 0:
+ px = pixel_data
+ px[np.where(labels != l)] = 0.0
+ values[l, 0] = self.contrast(px)
+ values[l, 1] = self.directionality(px)
+ statistics += self.record_measurement(
+ workspace,
+ image_name,
+ object_name,
+ "-",
+ "%s_%s" % (F_TAMURA, F_2),
+ values[:, 0],
+ )
+ statistics += self.record_measurement(
+ workspace,
+ image_name,
+ object_name,
+ "-",
+ "%s_%s" % (F_TAMURA, F_3),
+ values[:, 1],
+ )
+
+ coars = np.zeros([np.max(labs) + 1])
+ coars_hist = np.zeros([np.max(labs) + 1, HIST_COARS_BINS])
+ for l in labs:
+ if l != 0:
+ px = pixel_data
+ px[np.where(labels != l)] = 0.0
+ coars[l], coars_hist[l, :] = self.coarseness(px)
+ statistics += self.record_measurement(
+ workspace,
+ image_name,
+ object_name,
+ "-",
+ "%s_%s" % (F_TAMURA, F_1),
+ coars,
+ )
+ for b in range(0, HIST_COARS_BINS):
+ value = coars_hist[1:, b]
+ name = "CoarsenessHist_%dBinsHist_Bin%d" % (HIST_COARS_BINS, b)
+ statistics += self.record_measurement(
+ workspace,
+ image_name,
+ object_name,
+ "-",
+ "%s_%s" % (F_TAMURA, name),
+ value,
+ )
+
+ return statistics
+
+ def run_one_gabor(self, image_name, object_name, scale, workspace):
+ objects = workspace.get_objects(object_name)
+ labels = objects.segmented
+ object_count = np.max(labels)
+ if object_count > 0:
+ image = workspace.image_set.get_image(image_name, must_be_grayscale=True)
+ pixel_data = image.pixel_data
+ labels = objects.segmented
+ if image.has_mask:
+ mask = image.mask
+ else:
+ mask = None
+ try:
+ pixel_data = objects.crop_image_similarly(pixel_data)
+ if mask is not None:
+ mask = objects.crop_image_similarly(mask)
+ labels[~mask] = 0
+ except ValueError:
+ pixel_data, m1 = size_similarly(labels, pixel_data)
+ labels[~m1] = 0
+ if mask is not None:
+ mask, m2 = size_similarly(labels, mask)
+ labels[~m2] = 0
+ labels[~mask] = 0
+ pixel_data = normalized_per_object(pixel_data, labels)
+ best_score = np.zeros((object_count,))
+ for angle in range(self.gabor_angles.value):
+ theta = np.pi * angle / self.gabor_angles.value
+ g = gabor(pixel_data, labels, scale, theta)
+ score_r = fix(
+ scind.sum(
+ g.real, labels, np.arange(object_count, dtype=np.int32) + 1
+ )
+ )
+ score_i = fix(
+ scind.sum(
+ g.imag, labels, np.arange(object_count, dtype=np.int32) + 1
+ )
+ )
+ score = np.sqrt(score_r**2 + score_i**2)
+ best_score = np.maximum(best_score, score)
+ else:
+ best_score = np.zeros((0,))
+ statistics = self.record_measurement(
+ workspace, image_name, object_name, scale, F_GABOR, best_score
+ )
+ return statistics
+
+ def run_image_gabor(self, image_name, scale, workspace):
+ image = workspace.image_set.get_image(image_name, must_be_grayscale=True)
+ pixel_data = image.pixel_data
+ labels = np.ones(pixel_data.shape, int)
+ if image.has_mask:
+ labels[~image.mask] = 0
+ pixel_data = stretch(pixel_data, labels > 0)
+ best_score = 0
+ for angle in range(self.gabor_angles.value):
+ theta = np.pi * angle / self.gabor_angles.value
+ g = gabor(pixel_data, labels, scale, theta)
+ score_r = np.sum(g.real)
+ score_i = np.sum(g.imag)
+ score = np.sqrt(score_r**2 + score_i**2)
+ best_score = max(best_score, score)
+ statistics = self.record_image_measurement(
+ workspace, image_name, scale, F_GABOR, best_score
+ )
+ return statistics
+
+ def record_measurement(
+ self, workspace, image_name, object_name, scale, feature_name, result
+ ):
+ """Record the result of a measurement in the workspace's
+ measurements"""
+ data = fix(result)
+ data[~np.isfinite(data)] = 0
+
+ if scale == "-":
+ workspace.add_measurement(
+ object_name, "%s_%s_%s" % (TEXTURE, feature_name, image_name), data
+ )
+ statistics = [
+ [
+ image_name,
+ object_name,
+ feature_name,
+ scale,
+ "%f" % (d) if len(data) else "-",
+ ]
+ for d in data
+ ]
+ else:
+ workspace.add_measurement(
+ object_name,
+ "%s_%s_%s_%s" % (TEXTURE, feature_name, image_name, str(scale)),
+ data,
+ )
+ statistics = [
+ [
+ image_name,
+ object_name,
+ "%s %s" % (aggregate_name, feature_name),
+ scale,
+ "%.2f" % fn(data) if len(data) else "-",
+ ]
+ for aggregate_name, fn in (
+ ("min", np.min),
+ ("max", np.max),
+ ("mean", np.mean),
+ ("median", np.median),
+ ("std dev", np.std),
+ )
+ ]
+ return statistics
+
+ def record_image_measurement(
+ self, workspace, image_name, scale, feature_name, result
+ ):
+ """Record the result of a measurement in the workspace's
+ measurements"""
+ if not np.isfinite(result):
+ result = 0
+
+ if scale == "-":
+ workspace.measurements.add_image_measurement(
+ "%s_%s_%s" % (TEXTURE, feature_name, image_name), result
+ )
+ else:
+ workspace.measurements.add_image_measurement(
+ "%s_%s_%s_%s" % (TEXTURE, feature_name, image_name, str(scale)), result
+ )
+ statistics = [[image_name, "-", feature_name, scale, "%.2f" % (result)]]
+ return statistics
+
+ def upgrade_settings(self, setting_values, variable_revision_number, module_name):
+ """Adjust the setting_values for older save file versions
+
+ setting_values - a list of strings representing the settings for
+ this module.
+ variable_revision_number - the variable revision number of the module
+ that saved the settings
+ module_name - the name of the module that saved the settings
+
+ returns the modified settings, revision number
+ """
+ if variable_revision_number == 1:
+ #
+ # Added "wants_gabor"
+ #
+ setting_values = setting_values[:-1] + ["Yes"] + setting_values[-1:]
+ variable_revision_number = 2
+ if variable_revision_number == 2:
+ #
+ # Added angles
+ #
+ image_count = int(setting_values[0])
+ object_count = int(setting_values[1])
+ scale_count = int(setting_values[2])
+ scale_offset = 3 + image_count + object_count
+ new_setting_values = setting_values[:scale_offset]
+ for scale in setting_values[scale_offset : (scale_offset + scale_count)]:
+ new_setting_values += [scale, H_HORIZONTAL]
+ new_setting_values += setting_values[(scale_offset + scale_count) :]
+ setting_values = new_setting_values
+ variable_revision_number = 3
+
+ return setting_values, variable_revision_number
diff --git a/CP5/active_plugins/histogramequalization.py b/CP5/active_plugins/histogramequalization.py
new file mode 100644
index 00000000..817eb506
--- /dev/null
+++ b/CP5/active_plugins/histogramequalization.py
@@ -0,0 +1,231 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+import numpy
+import skimage.exposure
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import cellprofiler_core.image
+import cellprofiler_core.module
+import cellprofiler_core.setting
+import cellprofiler_core.setting.text
+from cellprofiler_core.setting.subscriber import ImageSubscriber
+
+__doc__ = """\
+HistogramEqualization
+=====================
+**HistogramEqualization** increases the global contrast of
+a low-contrast image or volume. Histogram equalization redistributes intensities
+to utilize the full range of intensities, such that the most common frequencies
+are more distinct.
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES YES
+============ ============ ===============
+
+Technical notes
+^^^^^^^^^^^^^^^
+This module can perform two types of histogram equalization; a global method (HE) and
+a local method (Adaptive Histogram Equalization - AHE). A local method might perform
+better in some cases but it might increase the background noise. The clipping limit
+setting can help limit noise amplification (Contrast Limited AHE - CLAHE).
+Look at the references for more information.
+
+References
+^^^^^^^^^^
+(`link `__)
+(`link `__)
+"""
+
+
+class HistogramEqualization(cellprofiler_core.module.ImageProcessing):
+ module_name = "HistogramEqualization"
+
+ variable_revision_number = 1
+
+ def create_settings(self):
+ super(HistogramEqualization, self).create_settings()
+
+ self.nbins = cellprofiler_core.setting.text.Integer(
+ "Bins", value=256, minval=0, doc="Number of bins for image histogram."
+ )
+
+ self.tile_size = cellprofiler_core.setting.text.Integer(
+ "Tile Size",
+ value=50,
+ minval=1,
+ doc="""The image is partitioned into tiles of the specified size. Choose a tile size that will fit at least one object of interest.
+ """,
+ )
+
+ self.mask = ImageSubscriber(
+ "Mask",
+ can_be_blank=True,
+ doc="""
+ Optional. Mask image must be the same size as "Input". Only unmasked points of the "Input" image are used
+ to compute the equalization, which is applied to the entire "Input" image.
+ """,
+ )
+
+ self.local = cellprofiler_core.setting.Binary("Local", False)
+
+ self.clip_limit = cellprofiler_core.setting.text.Float(
+ "Clip limit",
+ value=0.01,
+ minval=0,
+ maxval=1,
+ doc="""Normalized between 0 and 1. Higher values give more contrast but will also result in over-amplification of background in areas of low or no signal.
+ """,
+ )
+
+ self.do_3D = cellprofiler_core.setting.Binary(
+ text="Is your image 3D?",
+ value=False,
+ doc="""
+ If enabled, 3D specific settings will be available.""",
+ )
+
+ self.do_framewise = cellprofiler_core.setting.Binary(
+ text="Do framewise calculation?",
+ value=False,
+ doc="""
+ If enabled, the histogram equalization will be calculated frame-wise instead of using the image volume""",
+ )
+
+ self.tile_z_size = cellprofiler_core.setting.text.Integer(
+ "Tile Size (Z)",
+ value=5,
+ minval=1,
+ doc="""For 3D image you have the option of performing histogram equalization one z-frame at a time or using a 3D tile
+ """,
+ )
+
+ def settings(self):
+ __settings__ = super(HistogramEqualization, self).settings()
+
+ return __settings__ + [
+ self.nbins,
+ self.mask,
+ self.local,
+ self.tile_size,
+ self.clip_limit,
+ self.do_3D,
+ self.do_framewise,
+ self.tile_z_size,
+ ]
+
+ def visible_settings(self):
+ __settings__ = super(HistogramEqualization, self).settings()
+
+ __settings__ += [self.local, self.nbins, self.do_3D]
+
+ if not self.local.value:
+ __settings__ += [self.mask]
+ if self.do_3D.value:
+ __settings__ += [self.do_framewise]
+ else:
+ __settings__ += [
+ self.clip_limit,
+ self.tile_size,
+ ]
+ if self.do_3D.value:
+ __settings__ += [self.do_framewise]
+ if not self.do_framewise.value:
+ __settings__ += [self.tile_z_size]
+ return __settings__
+
+ def run(self, workspace):
+ x_name = self.x_name.value
+
+ y_name = self.y_name.value
+
+ images = workspace.image_set
+
+ x = images.get_image(x_name)
+
+ dimensions = x.dimensions
+
+ x_data = x.pixel_data
+
+ mask_data = None
+
+ if not self.mask.is_blank:
+ mask_name = self.mask.value
+
+ mask = images.get_image(mask_name)
+
+ mask_data = mask.pixel_data
+
+ nbins = self.nbins.value
+
+ if self.local.value:
+
+ kernel_size = self.tile_size.value
+ clip_limit = self.clip_limit.value
+
+ if self.do_3D.value:
+ y_data = numpy.zeros_like(x_data, dtype=numpy.float)
+ if self.do_framewise.value:
+ for index, plane in enumerate(x_data):
+ y_data[index] = skimage.exposure.equalize_adapthist(
+ plane,
+ kernel_size=kernel_size,
+ nbins=nbins,
+ clip_limit=clip_limit,
+ )
+ else:
+ kernel_size = (
+ self.tile_z_size.value,
+ self.tile_size.value,
+ self.tile_size.value,
+ )
+ y_data = skimage.exposure.equalize_adapthist(
+ x_data,
+ kernel_size=kernel_size,
+ nbins=nbins,
+ clip_limit=clip_limit,
+ )
+ else:
+ y_data = skimage.exposure.equalize_adapthist(
+ x_data, kernel_size=kernel_size, nbins=nbins, clip_limit=clip_limit
+ )
+ else:
+ if self.do_3D.value:
+ y_data = numpy.zeros_like(x_data, dtype=numpy.float)
+ if self.do_framewise.value:
+ for index, plane in enumerate(x_data):
+ y_data[index] = skimage.exposure.equalize_hist(
+ plane, nbins=nbins, mask=mask_data
+ )
+ else:
+ y_data = skimage.exposure.equalize_hist(
+ x_data, nbins=nbins, mask=mask_data
+ )
+ else:
+ y_data = skimage.exposure.equalize_hist(
+ x_data, nbins=nbins, mask=mask_data
+ )
+
+ y = cellprofiler_core.image.Image(
+ dimensions=dimensions, image=y_data, parent_image=x
+ )
+
+ images.add(y_name, y)
+
+ if self.show_window:
+ workspace.display_data.x_data = x_data
+
+ workspace.display_data.y_data = y_data
+
+ workspace.display_data.dimensions = dimensions
diff --git a/CP5/active_plugins/histogrammatching.py b/CP5/active_plugins/histogrammatching.py
new file mode 100644
index 00000000..5c958fcc
--- /dev/null
+++ b/CP5/active_plugins/histogrammatching.py
@@ -0,0 +1,144 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+import numpy
+import skimage.exposure
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import cellprofiler_core.image
+import cellprofiler_core.module
+import cellprofiler_core.setting
+import cellprofiler_core.setting.text
+from cellprofiler_core.setting.subscriber import ImageSubscriber
+
+__doc__ = """\
+HistogramMatching
+================+
+**HistogramMatching** manipulates the pixel intensity values an input image and matches
+them to the histogram of a reference image. It can be used as a way to normalize intensities
+across different images or different frames of the same image. It allows you to choose
+which frame to use as the reference.
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES NO
+============ ============ ===============
+
+References
+^^^^^^^^^^
+(`link `__)
+(`link `__)
+"""
+
+
+class HistogramMatching(cellprofiler_core.module.ImageProcessing):
+ module_name = "HistogramMatching"
+
+ variable_revision_number = 1
+
+ def create_settings(self):
+ super(HistogramMatching, self).create_settings()
+
+ self.reference_image = ImageSubscriber(
+ "Image to use as reference ",
+ doc="Select the image you want to use the reference.",
+ )
+
+ self.do_3D = cellprofiler_core.setting.Binary(
+ text="Is your image 3D?",
+ value=False,
+ doc="""
+ If enabled, 3D specific settings are available.""",
+ )
+
+ self.do_self_reference = cellprofiler_core.setting.Binary(
+ text="Use a frame within image as reference?",
+ value=False,
+ doc="""
+ If enabled, a frame within the 3D image is used as the reference image.""",
+ )
+
+ self.frame_number = cellprofiler_core.setting.text.Integer(
+ "Frame number",
+ value=5,
+ minval=1,
+ doc="""For 3D images, you have the option of performing histogram matching within the image using one of the frames in the image
+ """,
+ )
+
+ def settings(self):
+ __settings__ = super(HistogramMatching, self).settings()
+
+ return __settings__ + [
+ self.do_3D,
+ self.do_self_reference,
+ self.reference_image,
+ self.frame_number,
+ ]
+
+ def visible_settings(self):
+ __settings__ = super(HistogramMatching, self).settings()
+
+ __settings__ += [self.do_3D, self.reference_image]
+
+ if self.do_3D.value:
+ __settings__ += [self.do_self_reference]
+
+ if self.do_self_reference.value:
+ __settings__.remove(self.reference_image)
+ __settings__ += [self.frame_number]
+
+ return __settings__
+
+ def run(self, workspace):
+ x_name = self.x_name.value
+
+ y_name = self.y_name.value
+
+ images = workspace.image_set
+
+ x = images.get_image(x_name)
+
+ dimensions = x.dimensions
+
+ x_data = x.pixel_data
+
+ if x.volumetric:
+ y_data = numpy.zeros_like(x_data, dtype=numpy.float)
+
+ if self.do_self_reference.value:
+ reference_image = x_data[self.frame_number.value]
+ for index, plane in enumerate(x_data):
+ y_data[index] = skimage.exposure.match_histograms(
+ plane, reference_image
+ )
+ else:
+ reference_image = images.get_image(self.reference_image)
+ for index, plane in enumerate(x_data):
+ y_data = skimage.exposure.match_histograms(plane, reference_image)
+ else:
+ reference_image = images.get_image(self.reference_image).pixel_data
+ y_data = skimage.exposure.match_histograms(x_data, reference_image)
+
+ y = cellprofiler_core.image.Image(
+ dimensions=dimensions, image=y_data, parent_image=x
+ )
+
+ images.add(y_name, y)
+
+ if self.show_window:
+ workspace.display_data.x_data = x_data
+
+ workspace.display_data.y_data = y_data
+
+ workspace.display_data.dimensions = dimensions
diff --git a/CP5/active_plugins/pixelshuffle.py b/CP5/active_plugins/pixelshuffle.py
new file mode 100644
index 00000000..2d93cae0
--- /dev/null
+++ b/CP5/active_plugins/pixelshuffle.py
@@ -0,0 +1,92 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import logging
+import scipy.ndimage
+import numpy
+import random
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import cellprofiler_core.image
+import cellprofiler_core.module
+import cellprofiler_core.setting
+
+__doc__ = """\
+PixelShuffle
+============
+
+**PixelShuffle** takes the intensity of each pixel in an image and it randomly shuffles its position.
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES NO NO
+============ ============ ===============
+
+"""
+
+
+class PixelShuffle(cellprofiler_core.module.ImageProcessing):
+ module_name = "PixelShuffle"
+
+ variable_revision_number = 1
+
+ def settings(self):
+ __settings__ = super(PixelShuffle, self).settings()
+ return __settings__
+
+ def visible_settings(self):
+ """Return the settings as displayed to the user"""
+ __settings__ = super(PixelShuffle, self).settings()
+ return __settings__
+
+ def run(self, workspace):
+ x_name = self.x_name.value
+
+ y_name = self.y_name.value
+
+ images = workspace.image_set
+
+ x = images.get_image(x_name)
+
+ dimensions = x.dimensions
+
+ x_data = x.pixel_data
+
+ shape = numpy.array(x_data.shape).astype(int)
+
+ pxs = []
+ width, height = shape[:2]
+ for w in range(width):
+ for h in range(height):
+ pxs.append(x_data[w, h])
+ idx = list(range(len(pxs)))
+ random.shuffle(idx)
+ seq = []
+ for i in idx:
+ seq.append(pxs[i])
+ out = numpy.asarray(seq)
+ out = out.reshape(width, height)
+
+ y_data = out
+
+ y = cellprofiler_core.image.Image(
+ dimensions=dimensions, image=y_data, parent_image=x
+ )
+
+ images.add(y_name, y)
+
+ if self.show_window:
+ workspace.display_data.x_data = x_data
+ workspace.display_data.y_data = y_data
+ workspace.display_data.dimensions = dimensions
diff --git a/CP5/active_plugins/predict.py b/CP5/active_plugins/predict.py
new file mode 100755
index 00000000..03297336
--- /dev/null
+++ b/CP5/active_plugins/predict.py
@@ -0,0 +1,177 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import os
+import subprocess
+import tempfile
+import h5py # HDF5 is ilastik's preferred file format
+import logging
+import skimage
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+from cellprofiler_core.image import Image
+from cellprofiler_core.module import Module
+import cellprofiler_core.setting
+from cellprofiler_core.setting.choice import Choice
+from cellprofiler_core.setting.text import Pathname
+
+__doc__ = """\
+Predict
+=======
+
+**Predict** uses an ilastik pixel classifier to generate a probability image. Each
+channel represents the probability of the pixels in the image belong to
+a particular class. Use **ColorToGray** to separate channels for further
+processing. For example, use **IdentifyPrimaryObjects** on a
+(single-channel) probability map to generate a segmentation. The order
+of the channels in **ColorToGray** is the same as the order of the
+labels within the ilastik project.
+
+Additionally, please ensure CellProfiler is configured to load images in
+the same format as ilastik. For example, if your ilastik classifier is
+trained on RGB images, use **NamesAndTypes** to load images as RGB by
+selecting "*Color image*" from the *Select the image type* dropdown. If
+your classifier expects grayscale images, use **NamesAndTypes** to load
+images as "*Grayscale image*".
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES NO NO
+============ ============ ===============
+"""
+
+logger = logging.getLogger(__name__)
+
+
+class Predict(cellprofiler_core.module.ImageProcessing):
+ module_name = "Predict"
+
+ variable_revision_number = 2
+
+ def create_settings(self):
+ super(Predict, self).create_settings()
+
+ self.executable = Pathname(
+ "Executable",
+ doc="ilastik command line executable name, or location if it is not on your path.",
+ )
+
+ self.project_file = Pathname(
+ "Project file", doc="Path to the project file (\*.ilp)."
+ )
+
+ self.project_type = Choice(
+ "Select the project type",
+ ["Pixel Classification", "Autocontext (2-stage)"],
+ "Pixel Classification",
+ doc="""\
+Select the project type which matches the project file specified by
+*Project file*. CellProfiler supports two types of ilastik projects:
+
+- *Pixel Classification*: Classify the pixels of an image given user
+ annotations. `Read more`_.
+
+- *Autocontext (2-stage)*: Perform pixel classification in multiple
+ stages, sharing predictions between stages to improve results. `Read
+ more `__.
+
+.. _Read more: http://ilastik.org/documentation/pixelclassification/pixelclassification
+""",
+ )
+
+ def settings(self):
+ settings = super(Predict, self).settings()
+
+ settings += [self.executable, self.project_file, self.project_type]
+
+ return settings
+
+ def visible_settings(self):
+ visible_settings = super(Predict, self).visible_settings()
+
+ visible_settings += [self.executable, self.project_file, self.project_type]
+
+ return visible_settings
+
+ def run(self, workspace):
+ image = workspace.image_set.get_image(self.x_name.value)
+
+ x_data = image.pixel_data
+ x_data = x_data*image.scale
+
+ fin = tempfile.NamedTemporaryFile(suffix=".h5", delete=False)
+
+ fout = tempfile.NamedTemporaryFile(suffix=".h5", delete=False)
+
+ if self.executable.value[-4:] == ".app":
+ executable = os.path.join(self.executable.value, "Contents/MacOS/ilastik")
+ else:
+ executable = self.executable.value
+
+ cmd = [
+ executable,
+ "--headless",
+ "--project",
+ self.project_file.value,
+ "--output_format",
+ "hdf5",
+ ]
+
+ if self.project_type.value in ["Pixel Classification"]:
+ cmd += ["--export_source", "Probabilities"]
+ elif self.project_type.value in ["Autocontext (2-stage)"]:
+
+ cmd += ["--export_source", "probabilities stage 2"]
+ # cmd += ["--export_source", "probabilities all stages"]
+
+ cmd += ["--output_filename_format", fout.name, fin.name]
+
+ try:
+ with h5py.File(fin.name, "w") as f:
+ shape = x_data.shape
+
+ f.create_dataset("data", shape, data=x_data)
+
+ fin.close()
+
+ fout.close()
+
+ subprocess.check_call(cmd)
+
+ with h5py.File(fout.name, "r") as f:
+ y_data = f["exported_data"][()]
+
+ y = Image(y_data)
+
+ workspace.image_set.add(self.y_name.value, y)
+
+ if self.show_window:
+ workspace.display_data.x_data = x_data
+
+ workspace.display_data.y_data = y_data
+
+ workspace.display_data.dimensions = image.dimensions
+ except subprocess.CalledProcessError as cpe:
+ logger.error(
+ "Command {} exited with status {}".format(cpe.output, cpe.returncode),
+ cpe,
+ )
+
+ raise cpe
+ except IOError as ioe:
+ raise ioe
+ finally:
+ os.unlink(fin.name)
+
+ os.unlink(fout.name)
diff --git a/CP5/active_plugins/runcellpose.py b/CP5/active_plugins/runcellpose.py
new file mode 100644
index 00000000..9c1ac322
--- /dev/null
+++ b/CP5/active_plugins/runcellpose.py
@@ -0,0 +1,702 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import numpy
+import os
+import skimage
+import importlib.metadata
+import subprocess
+import uuid
+import shutil
+import logging
+import sys
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+from cellprofiler_core.image import Image
+from cellprofiler_core.module.image_segmentation import ImageSegmentation
+from cellprofiler_core.object import Objects
+from cellprofiler_core.setting import Binary, ValidationError
+from cellprofiler_core.setting.choice import Choice
+from cellprofiler_core.setting.do_something import DoSomething
+from cellprofiler_core.setting.subscriber import ImageSubscriber
+from cellprofiler_core.preferences import get_default_output_directory
+from cellprofiler_core.setting.text import (
+ Integer,
+ ImageName,
+ Directory,
+ Filename,
+ Float,
+)
+
+CUDA_LINK = "https://pytorch.org/get-started/locally/"
+Cellpose_link = " https://doi.org/10.1038/s41592-020-01018-x"
+Omnipose_link = "https://doi.org/10.1101/2021.11.03.467199"
+LOGGER = logging.getLogger(__name__)
+
+__doc__ = f"""\
+RunCellpose
+===========
+
+**RunCellpose** uses a pre-trained machine learning model (Cellpose) to detect cells or nuclei in an image.
+
+This module is useful for automating simple segmentation tasks in CellProfiler.
+The module accepts greyscale input images and produces an object set. Probabilities can also be captured as an image.
+
+Loading in a model will take slightly longer the first time you run it each session. When evaluating
+performance you may want to consider the time taken to predict subsequent images.
+
+This module now also supports Ominpose. Omnipose builds on Cellpose, for the purpose of **RunCellpose** it adds 2 additional
+features: additional models; bact-omni and cyto2-omni which were trained using the Omnipose architechture, and bact
+and the mask reconstruction algorithm for Omnipose that was created to solve over-segemnation of large cells; useful for bacterial cells,
+but can be used for other arbitrary and anisotropic shapes. You can mix and match Omnipose models with Cellpose style masking or vice versa.
+
+The module has been updated to be compatible with the latest release of Cellpose. From the old version of the module the 'cells' model corresponds to 'cyto2' model.
+
+Installation:
+
+It is necessary that you have installed Cellpose version >= 1.0.2
+
+You'll want to run `pip install cellpose` on your CellProfiler Python environment to setup Cellpose. If you have an older version of Cellpose
+run 'python -m pip install cellpose --upgrade'.
+
+To use Omnipose models, and mask reconstruction method you'll want to install Omnipose 'pip install omnipose' and Cellpose version 1.0.2 'pip install cellpose==1.0.2'.
+
+On the first time loading into CellProfiler, Cellpose will need to download some model files from the internet. This
+may take some time. If you want to use a GPU to run the model, you'll need a compatible version of PyTorch and a
+supported GPU. Instructions are avaiable at this link: {CUDA_LINK}
+
+Stringer, C., Wang, T., Michaelos, M. et al. Cellpose: a generalist algorithm for cellular segmentation. Nat Methods 18, 100–106 (2021). {Cellpose_link}
+Kevin J. Cutler, Carsen Stringer, Paul A. Wiggins, Joseph D. Mougous. Omnipose: a high-precision morphology-independent solution for bacterial cell segmentation. bioRxiv 2021.11.03.467199. {Omnipose_link}
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES NO
+============ ============ ===============
+
+"""
+
+CELLPOSE_DOCKER_NO_PRETRAINED = "cellprofiler/runcellpose_no_pretrained:0.1"
+CELLPOSE_DOCKER_IMAGE_WITH_PRETRAINED = "cellprofiler/runcellpose_with_pretrained:0.1"
+
+MODEL_NAMES = ['cyto','nuclei','tissuenet','livecell', 'cyto2', 'general',
+ 'CP', 'CPx', 'TN1', 'TN2', 'TN3', 'LC1', 'LC2', 'LC3', 'LC4', 'custom']
+
+
+class RunCellpose(ImageSegmentation):
+ category = "Object Processing"
+
+ module_name = "RunCellpose"
+
+ variable_revision_number = 4
+
+ doi = {
+ "Please cite the following when using RunCellPose:": "https://doi.org/10.1038/s41592-020-01018-x",
+ "If you are using Omnipose also cite the following:": "https://doi.org/10.1101/2021.11.03.467199",
+ }
+
+ def create_settings(self):
+ super(RunCellpose, self).create_settings()
+
+ self.docker_or_python = Choice(
+ text="Run CellPose in docker or local python environment",
+ choices=["Docker", "Python"],
+ value="Docker",
+ doc="""\
+If Docker is selected, ensure that Docker Desktop is open and running on your
+computer. On first run of the RunCellpose plugin, the Docker container will be
+downloaded. However, this slow downloading process will only have to happen
+once.
+
+If Python is selected, the Python environment in which CellProfiler and Cellpose
+are installed will be used.
+""",
+ )
+
+ self.docker_image = Choice(
+ text="Select Cellpose docker image",
+ choices=[CELLPOSE_DOCKER_IMAGE_WITH_PRETRAINED, CELLPOSE_DOCKER_NO_PRETRAINED],
+ value=CELLPOSE_DOCKER_IMAGE_WITH_PRETRAINED,
+ doc="""\
+Select which Docker image to use for running Cellpose.
+
+If you are not using a custom model, you can select
+**"{CELLPOSE_DOCKER_IMAGE_WITH_PRETRAINED}"**. If you are using a custom model,
+you can use either **"{CELLPOSE_DOCKER_NO_PRETRAINED}"** or
+**"{CELLPOSE_DOCKER_IMAGE_WITH_PRETRAINED}"**, but the latter will be slightly
+larger (~500 MB) due to including all of the pretrained models.
+""".format(
+ **{"CELLPOSE_DOCKER_NO_PRETRAINED": CELLPOSE_DOCKER_NO_PRETRAINED,
+ "CELLPOSE_DOCKER_IMAGE_WITH_PRETRAINED": CELLPOSE_DOCKER_IMAGE_WITH_PRETRAINED}
+),
+ )
+
+ self.expected_diameter = Integer(
+ text="Expected object diameter",
+ value=30,
+ minval=0,
+ doc="""\
+The average diameter of the objects to be detected. Setting this to 0 will attempt to automatically detect object size.
+Note that automatic diameter mode does not work when running on 3D images.
+
+Cellpose models come with a pre-defined object diameter. Your image will be resized during detection to attempt to
+match the diameter expected by the model. The default models have an expected diameter of ~16 pixels, if trying to
+detect much smaller objects it may be more efficient to resize the image first using the Resize module.
+""",
+ )
+
+ self.mode = Choice(
+ text="Detection mode",
+ choices=MODEL_NAMES,
+ value=MODEL_NAMES[0],
+ doc="""\
+CellPose comes with models for detecting nuclei or cells. Alternatively, you can supply a custom-trained model
+generated using the command line or Cellpose GUI. Custom models can be useful if working with unusual cell types.
+""",
+ )
+
+ self.omni = Binary(
+ text="Use Omnipose for mask reconstruction",
+ value=False,
+ doc="""\
+If enabled, use omnipose mask recontruction features will be used (Omnipose installation required and CellPose >= 1.0) """,
+ )
+
+ self.do_3D = Binary(
+ text="Use 3D",
+ value=False,
+ doc="""\
+If enabled, 3D specific settings will be available.""",
+ )
+
+ self.use_gpu = Binary(
+ text="Use GPU",
+ value=False,
+ doc=f"""\
+If enabled, Cellpose will attempt to run detection on your system's graphics card (GPU).
+Note that you will need a CUDA-compatible GPU and correctly configured PyTorch version, see this link for details:
+{CUDA_LINK}
+
+If disabled or incorrectly configured, Cellpose will run on your CPU instead. This is much slower but more compatible
+with different hardware setups.
+
+Note that, particularly when in 3D mode, lack of GPU memory can become a limitation. If a model crashes you may need to
+re-start CellProfiler to release GPU memory. Resizing large images prior to running them through the model can free up
+GPU memory.
+""",
+ )
+
+ self.use_averaging = Binary(
+ text="Use averaging",
+ value=False,
+ doc="""\
+If enabled, CellPose will run it's 4 inbuilt models and take a consensus to determine the results. If disabled, only a
+single model will be called to produce results. Disabling averaging is faster to run but less accurate.""",
+ )
+
+ self.invert = Binary(
+ text="Invert images",
+ value=False,
+ doc="""\
+If enabled the image will be inverted and also normalized. For use with fluorescence images using bact model (bact model was trained on phase images""",
+ )
+
+ self.supply_nuclei = Binary(
+ text="Supply nuclei image as well?",
+ value=False,
+ doc="""
+When detecting whole cells, you can provide a second image featuring a nuclear stain to assist
+the model with segmentation. This can help to split touching cells.""",
+ )
+
+ self.nuclei_image = ImageSubscriber(
+ "Select the nuclei image",
+ doc="Select the image you want to use as the nuclear stain.",
+ )
+
+ self.save_probabilities = Binary(
+ text="Save probability image?",
+ value=False,
+ doc="""
+If enabled, the probability scores from the model will be recorded as a new image.
+Probability >0 is considered as being part of a cell.
+You may want to use a higher threshold to manually generate objects.""",
+ )
+
+ self.probabilities_name = ImageName(
+ "Name the probability image",
+ "Probabilities",
+ doc="Enter the name you want to call the probability image produced by this module.",
+ )
+
+ self.model_directory = Directory(
+ "Location of the pre-trained model file",
+ doc=f"""\
+*(Used only when using a custom pre-trained model)*
+Select the location of the pre-trained CellPose model file that will be used for detection.""",
+ )
+
+ def get_directory_fn():
+ """Get the directory for the rules file name"""
+ return self.model_directory.get_absolute_path()
+
+ def set_directory_fn(path):
+ dir_choice, custom_path = self.model_directory.get_parts_from_path(path)
+
+ self.model_directory.join_parts(dir_choice, custom_path)
+
+ self.model_file_name = Filename(
+ "Pre-trained model file name",
+ "cyto_0",
+ get_directory_fn=get_directory_fn,
+ set_directory_fn=set_directory_fn,
+ doc=f"""\
+*(Used only when using a custom pre-trained model)*
+This file can be generated by training a custom model withing the CellPose GUI or command line applications.""",
+ )
+
+ self.gpu_test = DoSomething(
+ "",
+ "Test GPU",
+ self.do_check_gpu,
+ doc=f"""\
+Press this button to check whether a GPU is correctly configured.
+
+If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the
+required dependencies are not installed.
+If you have multiple GPUs on your system, this button will only test the first one.
+""",
+ )
+
+ self.flow_threshold = Float(
+ text="Flow threshold",
+ value=0.4,
+ minval=0,
+ doc="""\
+The flow_threshold parameter is the maximum allowed error of the flows for each mask. The default is flow_threshold=0.4.
+Increase this threshold if cellpose is not returning as many masks as you’d expect.
+Similarly, decrease this threshold if cellpose is returning too many ill-shaped masks
+""",
+ )
+
+ self.cellprob_threshold = Float(
+ text="Cell probability threshold",
+ value=0.0,
+ minval=-6.0,
+ maxval=6.0,
+ doc=f"""\
+Cell probability threshold (all pixels with probability above threshold kept for masks). Recommended default is 0.0.
+Values vary from -6 to 6
+""",
+ )
+
+ self.manual_GPU_memory_share = Float(
+ text="GPU memory share for each worker",
+ value=0.1,
+ minval=0.0000001,
+ maxval=1,
+ doc="""\
+Fraction of the GPU memory share available to each worker. Value should be set such that this number times the number
+of workers in each copy of CellProfiler times the number of copies of CellProfiler running (if applicable) is <1
+""",
+ )
+
+ self.stitch_threshold = Float(
+ text="Stitch Threshold",
+ value=0.0,
+ minval=0,
+ doc=f"""\
+There may be additional differences in YZ and XZ slices that make them unable to be used for 3D segmentation.
+In those instances, you may want to turn off 3D segmentation (do_3D=False) and run instead with stitch_threshold>0.
+Cellpose will create masks in 2D on each XY slice and then stitch them across slices if the IoU between the mask on the current slice and the next slice is greater than or equal to the stitch_threshold.
+""",
+ )
+
+ self.min_size = Integer(
+ text="Minimum size",
+ value=15,
+ minval=-1,
+ doc="""\
+Minimum number of pixels per mask, can turn off by setting value to -1
+""",
+ )
+
+ self.remove_edge_masks = Binary(
+ text="Remove objects that are touching the edge?",
+ value=True,
+ doc="""
+If you do not want to include any object masks that are not in full view in the image, you can have the masks that have pixels touching the the edges removed.
+The default is set to "Yes".
+""",
+ )
+
+ def settings(self):
+ return [
+ self.x_name,
+ self.docker_or_python,
+ self.docker_image,
+ self.expected_diameter,
+ self.mode,
+ self.y_name,
+ self.use_gpu,
+ self.use_averaging,
+ self.supply_nuclei,
+ self.nuclei_image,
+ self.save_probabilities,
+ self.probabilities_name,
+ self.model_directory,
+ self.model_file_name,
+ self.flow_threshold,
+ self.cellprob_threshold,
+ self.manual_GPU_memory_share,
+ self.stitch_threshold,
+ self.do_3D,
+ self.min_size,
+ self.omni,
+ self.invert,
+ self.remove_edge_masks,
+ ]
+
+ def visible_settings(self):
+ vis_settings = [self.docker_or_python]
+
+ if self.docker_or_python.value == "Docker":
+ vis_settings += [self.docker_image]
+
+ vis_settings += [self.mode, self.x_name]
+
+ if self.docker_or_python.value == "Python":
+ vis_settings += [self.omni]
+
+ if self.mode.value != "nuclei":
+ vis_settings += [self.supply_nuclei]
+ if self.supply_nuclei.value:
+ vis_settings += [self.nuclei_image]
+ if self.mode.value == "custom":
+ vis_settings += [
+ self.model_directory,
+ self.model_file_name,
+ ]
+
+ vis_settings += [
+ self.expected_diameter,
+ self.cellprob_threshold,
+ self.min_size,
+ self.flow_threshold,
+ self.y_name,
+ self.invert,
+ self.save_probabilities,
+ ]
+
+ vis_settings += [self.do_3D, self.stitch_threshold, self.remove_edge_masks]
+
+ if self.do_3D.value:
+ vis_settings.remove(self.stitch_threshold)
+
+ if self.save_probabilities.value:
+ vis_settings += [self.probabilities_name]
+
+ vis_settings += [self.use_averaging, self.use_gpu]
+
+ if self.docker_or_python.value == 'Python':
+ if self.use_gpu.value:
+ vis_settings += [self.gpu_test, self.manual_GPU_memory_share]
+
+ return vis_settings
+
+ def validate_module(self, pipeline):
+ """If using custom model, validate the model file opens and works"""
+ if self.mode.value == "custom":
+ model_file = self.model_file_name.value
+ model_directory = self.model_directory.get_absolute_path()
+ model_path = os.path.join(model_directory, model_file)
+ try:
+ open(model_path)
+ except:
+ raise ValidationError(
+ "Failed to load custom file: %s " % model_path,
+ self.model_file_name,
+ )
+ if self.docker_or_python.value == "Python":
+ try:
+ model = models.CellposeModel(pretrained_model=model_path, gpu=self.use_gpu.value)
+ except:
+ raise ValidationError(
+ "Failed to load custom model: %s "
+ % model_path, self.model_file_name,
+ )
+
+ def run(self, workspace):
+ x_name = self.x_name.value
+ y_name = self.y_name.value
+ images = workspace.image_set
+ x = images.get_image(x_name)
+ dimensions = x.dimensions
+ x_data = x.pixel_data
+ anisotropy = 0.0
+ if self.do_3D.value:
+ anisotropy = x.spacing[0] / x.spacing[1]
+
+ diam = self.expected_diameter.value if self.expected_diameter.value > 0 else None
+
+ if x.multichannel:
+ raise ValueError(
+ "Color images are not currently supported. Please provide greyscale images."
+ )
+
+ if self.mode.value != "nuclei" and self.supply_nuclei.value:
+ nuc_image = images.get_image(self.nuclei_image.value)
+ # CellPose expects RGB, we'll have a blank red channel, cells in green and nuclei in blue.
+ if self.do_3D.value:
+ x_data = numpy.stack(
+ (numpy.zeros_like(x_data), x_data, nuc_image.pixel_data), axis=1
+ )
+
+ else:
+ x_data = numpy.stack(
+ (numpy.zeros_like(x_data), x_data, nuc_image.pixel_data), axis=-1
+ )
+
+ channels = [2, 3]
+ else:
+ channels = [0, 0]
+
+ if self.docker_or_python.value == "Python":
+ from cellpose import models, io, core, utils
+ self.cellpose_ver = importlib.metadata.version('cellpose')
+ if float(self.cellpose_ver[0:3]) >= 0.6 and int(self.cellpose_ver[0])<2:
+ if self.mode.value != 'custom':
+ model = models.Cellpose(model_type= self.mode.value,
+ gpu=self.use_gpu.value)
+ else:
+ model_file = self.model_file_name.value
+ model_directory = self.model_directory.get_absolute_path()
+ model_path = os.path.join(model_directory, model_file)
+ model = models.CellposeModel(pretrained_model=model_path, gpu=self.use_gpu.value)
+
+ else:
+ if self.mode.value != 'custom':
+ model = models.CellposeModel(model_type= self.mode.value,
+ gpu=self.use_gpu.value)
+ else:
+ model_file = self.model_file_name.value
+ model_directory = self.model_directory.get_absolute_path()
+ model_path = os.path.join(model_directory, model_file)
+ model = models.CellposeModel(pretrained_model=model_path, gpu=self.use_gpu.value)
+
+ if self.use_gpu.value and model.torch:
+ from torch import cuda
+ cuda.set_per_process_memory_fraction(self.manual_GPU_memory_share.value)
+
+ try:
+ if float(self.cellpose_ver[0:3]) >= 0.7 and int(self.cellpose_ver[0])<2:
+ y_data, flows, *_ = model.eval(
+ x_data,
+ channels=channels,
+ diameter=diam,
+ net_avg=self.use_averaging.value,
+ do_3D=self.do_3D.value,
+ anisotropy=anisotropy,
+ flow_threshold=self.flow_threshold.value,
+ cellprob_threshold=self.cellprob_threshold.value,
+ stitch_threshold=self.stitch_threshold.value,
+ min_size=self.min_size.value,
+ omni=self.omni.value,
+ invert=self.invert.value,
+ )
+ else:
+ y_data, flows, *_ = model.eval(
+ x_data,
+ channels=channels,
+ diameter=diam,
+ net_avg=self.use_averaging.value,
+ do_3D=self.do_3D.value,
+ anisotropy=anisotropy,
+ flow_threshold=self.flow_threshold.value,
+ cellprob_threshold=self.cellprob_threshold.value,
+ stitch_threshold=self.stitch_threshold.value,
+ min_size=self.min_size.value,
+ invert=self.invert.value,
+ )
+
+ if self.remove_edge_masks:
+ y_data = utils.remove_edge_masks(y_data)
+
+ except Exception as a:
+ print(f"Unable to create masks. Check your module settings. {a}")
+ finally:
+ if self.use_gpu.value and model.torch:
+ # Try to clear some GPU memory for other worker processes.
+ try:
+ cuda.empty_cache()
+ except Exception as e:
+ print(f"Unable to clear GPU memory. You may need to restart CellProfiler to change models. {e}")
+
+ elif self.docker_or_python.value == "Docker":
+ # Define how to call docker
+ docker_path = "docker" if sys.platform.lower().startswith("win") else "/usr/local/bin/docker"
+ # Create a UUID for this run
+ unique_name = str(uuid.uuid4())
+ # Directory that will be used to pass images to the docker container
+ temp_dir = os.path.join(get_default_output_directory(), ".cellprofiler_temp", unique_name)
+ temp_img_dir = os.path.join(temp_dir, "img")
+
+ os.makedirs(temp_dir, exist_ok=True)
+ os.makedirs(temp_img_dir, exist_ok=True)
+
+ temp_img_path = os.path.join(temp_img_dir, unique_name+".tiff")
+ if self.mode.value == "custom":
+ model_file = self.model_file_name.value
+ model_directory = self.model_directory.get_absolute_path()
+ model_path = os.path.join(model_directory, model_file)
+ temp_model_dir = os.path.join(temp_dir, "model")
+
+ os.makedirs(temp_model_dir, exist_ok=True)
+ # Copy the model
+ shutil.copy(model_path, os.path.join(temp_model_dir, model_file))
+
+ # Save the image to the Docker mounted directory
+ skimage.io.imsave(temp_img_path, x_data)
+
+ cmd = f"""
+ {docker_path} run --rm -v {temp_dir}:/data
+ {self.docker_image.value}
+ {'--gpus all' if self.use_gpu.value else ''}
+ cellpose
+ --dir /data/img
+ {'--pretrained_model ' + self.mode.value if self.mode.value != 'custom' else '--pretrained_model /data/model/' + model_file}
+ --chan {channels[0]}
+ --chan2 {channels[1]}
+ --diameter {diam}
+ {'--net_avg' if self.use_averaging.value else ''}
+ {'--do_3D' if self.do_3D.value else ''}
+ --anisotropy {anisotropy}
+ --flow_threshold {self.flow_threshold.value}
+ --cellprob_threshold {self.cellprob_threshold.value}
+ --stitch_threshold {self.stitch_threshold.value}
+ --min_size {self.min_size.value}
+ {'--invert' if self.invert.value else ''}
+ {'--exclude_on_edges' if self.remove_edge_masks.value else ''}
+ --verbose
+ """
+
+ try:
+ subprocess.run(cmd.split(), text=True)
+ cellpose_output = numpy.load(os.path.join(temp_img_dir, unique_name + "_seg.npy"), allow_pickle=True).item()
+
+ y_data = cellpose_output["masks"]
+ flows = cellpose_output["flows"]
+ finally:
+ # Delete the temporary files
+ try:
+ shutil.rmtree(temp_dir)
+ except:
+ LOGGER.error("Unable to delete temporary directory, files may be in use by another program.")
+ LOGGER.error("Temp folder is subfolder {tempdir} in your Default Output Folder.\nYou may need to remove it manually.")
+
+
+ y = Objects()
+ y.segmented = y_data
+ y.parent_image = x.parent_image
+ objects = workspace.object_set
+ objects.add_objects(y, y_name)
+
+ if self.save_probabilities.value:
+ # Flows come out sized relative to CellPose's inbuilt model size.
+ # We need to slightly resize to match the original image.
+ size_corrected = skimage.transform.resize(flows[2], y_data.shape)
+ prob_image = Image(
+ size_corrected,
+ parent_image=x.parent_image,
+ convert=False,
+ dimensions=len(size_corrected.shape),
+ )
+
+ workspace.image_set.add(self.probabilities_name.value, prob_image)
+
+ if self.show_window:
+ workspace.display_data.probabilities = size_corrected
+
+ self.add_measurements(workspace)
+
+ if self.show_window:
+ if x.volumetric:
+ # Can't show CellPose-accepted colour images in 3D
+ workspace.display_data.x_data = x.pixel_data
+ else:
+ workspace.display_data.x_data = x_data
+ workspace.display_data.y_data = y_data
+ workspace.display_data.dimensions = dimensions
+
+ def display(self, workspace, figure):
+ if self.save_probabilities.value:
+ layout = (2, 2)
+ else:
+ layout = (2, 1)
+
+ figure.set_subplots(
+ dimensions=workspace.display_data.dimensions, subplots=layout
+ )
+
+ figure.subplot_imshow(
+ colormap="gray",
+ image=workspace.display_data.x_data,
+ title="Input Image",
+ x=0,
+ y=0,
+ )
+
+ figure.subplot_imshow_labels(
+ image=workspace.display_data.y_data,
+ sharexy=figure.subplot(0, 0),
+ title=self.y_name.value,
+ x=1,
+ y=0,
+ )
+ if self.save_probabilities.value:
+ figure.subplot_imshow(
+ colormap="gray",
+ image=workspace.display_data.probabilities,
+ sharexy=figure.subplot(0, 0),
+ title=self.probabilities_name.value,
+ x=0,
+ y=1,
+ )
+
+ def do_check_gpu(self):
+ import importlib.util
+ torch_installed = importlib.util.find_spec('torch') is not None
+ self.cellpose_ver = importlib.metadata.version('cellpose')
+ #if the old version of cellpose <2.0, then use istorch kwarg
+ if float(self.cellpose_ver[0:3]) >= 0.7 and int(self.cellpose_ver[0])<2:
+ GPU_works = core.use_gpu(istorch=torch_installed)
+ else: # if new version of cellpose, use use_torch kwarg
+ GPU_works = core.use_gpu(use_torch=torch_installed)
+ if GPU_works:
+ message = "GPU appears to be working correctly!"
+ else:
+ message = (
+ "GPU test failed. There may be something wrong with your configuration."
+ )
+ import wx
+
+ wx.MessageBox(message, caption="GPU Test")
+
+ def upgrade_settings(self, setting_values, variable_revision_number, module_name):
+ if variable_revision_number == 1:
+ setting_values = setting_values + ["0.4", "0.0"]
+ variable_revision_number = 2
+ if variable_revision_number == 2:
+ setting_values = setting_values + ["0.0", False, "15", "1.0", False, False]
+ variable_revision_number = 3
+ if variable_revision_number == 3:
+ setting_values = [setting_values[0]] + ["Python",CELLPOSE_DOCKER_IMAGE_WITH_PRETRAINED] + setting_values[1:]
+ variable_revision_number = 4
+ return setting_values, variable_revision_number
diff --git a/CP5/active_plugins/runimagejscript.py b/CP5/active_plugins/runimagejscript.py
new file mode 100644
index 00000000..2d34545c
--- /dev/null
+++ b/CP5/active_plugins/runimagejscript.py
@@ -0,0 +1,722 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+from os import path
+from wx import Gauge
+from wx import Window
+from collections.abc import Iterable
+from threading import Thread
+from sys import platform
+import time
+import skimage.io
+import cpij.bridge as ijbridge, cpij.server as ijserver
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+from cellprofiler_core.image import Image
+from cellprofiler_core.module import Module
+from cellprofiler_core.preferences import ABSOLUTE_FOLDER_NAME
+from cellprofiler_core.setting.choice import Choice
+from cellprofiler_core.setting.text import Filename, Text, Directory
+from cellprofiler_core.constants.module import (
+ IO_FOLDER_CHOICE_HELP_TEXT,
+)
+from cellprofiler_core.setting import ValidationError
+from cellprofiler_core.setting import Binary
+from cellprofiler_core.setting.do_something import DoSomething, RemoveSettingButton
+from cellprofiler_core.setting._settings_group import SettingsGroup
+from cellprofiler_core.setting import Divider, HiddenCount
+from cellprofiler_core.setting.subscriber import ImageSubscriber
+
+from wx import Gauge
+from wx import Window
+from collections.abc import Iterable
+from threading import Thread
+from sys import platform
+import time
+import skimage.io
+import cpij.bridge as ijbridge, cpij.server as ijserver
+
+imagej_link = "https://doi.org/10.1038/nmeth.2089"
+imagej2_link = "https://doi.org/10.1186/s12859-017-1934-z"
+pyimagej_link = "https://doi.org/10.1038/s41592-022-01655-4"
+
+__doc__ = """\
+RunImageJScript
+=================
+
+The **RunImageJScript** module allows you to run any supported ImageJ script as part
+of your workflow.
+
+First, select a script file. Then click the \"Get parameters from script\" button to detect required inputs for your
+script. Each input will have its own setting created, allowing you to pass data from CellProfiler to ImageJ.
+
+After filling in any required inputs you can run the script normally.
+
+Optionally, you can select a local existing ImageJ installation to be used to run your script, or specify an initialization
+string (per https://github.com/imagej/pyimagej/blob/master/doc/Initialization.md). If no input is provided, or the
+input is invalid, the latest version will be downloaded if necessary and used.
+
+Notes
+^^^^^^^
+
+1. Only numeric, text and image input types are currently supported.
+2. Outputs must be explicitly declared in the script via @OUTPUT
+3. Only outputs of type net.imagej.Dataset or net.imagej.ImgPlus are currently supported
+
+See also
+^^^^^^^^
+
+ImageJ Scripting: https://imagej.net/Scripting
+Schneider, C. A., Rasband, W. S., & Eliceiri, K. W. (2012). NIH Image to ImageJ: 25 years of image analysis. Nature Methods, 9(7), 671–675. {imagej_link}
+Rueden, C. T., Schindelin, J., Hiner, M. C., DeZonia, B. E., Walter, A. E., Arena, E. T., & Eliceiri, K. W. (2017). ImageJ2: ImageJ for the next generation of scientific image data. BMC Bioinformatics, 18(1). {imagej2_link}
+Rueden, C.T., Hiner, M.C., Evans, E.L. Pinkart, M.A., Lucas, A.M., Carpenter, A.E., Cimini, B.A., & Eliceiri, K. W. (2022). PyImageJ: A library for integrating ImageJ and Python. Nat Methods 19, 1326–1327 . {pyimagej_link}
+
+
+"""
+
+global stop_progress_thread
+stop_progress_thread = False # Used to control the display of progress graphics
+
+
+class PyimagejError(EnvironmentError):
+ """
+ An exception indicating that something went wrong in PyimageJ
+ """
+
+ def __init__(self, message):
+ super(EnvironmentError, self).__init__(message)
+ self.message = message
+
+
+def add_param_info_settings(group, param_name, param_type, param_class):
+ """
+ Each extracted name, type and input/output class is saved into a (hidden) setting. This is useful information to
+ have when saving and loading pipelines back into CellProfiler.
+
+ Parameters
+ ----------
+ group : SettingsGroup, required
+ The SettingsGroup for this parameter, to hold the hidden info settings
+ param_name : str, required
+ The name of the parameter
+ param_type : str, required
+ The Java class name describing the parameter type
+ param_class: str, required
+ One of {input_class} or {output_class}, based on the parameter use
+ """
+ group.append("name", Text("Parameter name", param_name))
+ group.append(
+ "type",
+ Text("Parameter type", param_type),
+ )
+ group.append(
+ "io_class",
+ Text("Parameter classification", param_class),
+ )
+
+
+class RunImageJScript(Module):
+ """
+ Module to run ImageJ scripts via pyimagej
+ """
+
+ module_name = "RunImageJScript"
+ variable_revision_number = 3
+ category = "Advanced"
+
+ doi = {"If you are using RunImageJScript please cite the following": pyimagej_link }
+
+ def __init__(self):
+ super().__init__()
+ self.parsed_params = False # Used for validation
+ self.initialization_failed = False # Used for validation
+
+ def create_settings(self):
+ module_explanation = [
+ "The"
+ + self.module_name
+ + "module allows you to run any supported ImageJ script as part of your workflow.",
+ "First, select your desired initialization method and specify the app directory or endpoint(s) if needed.",
+ "Then select a script file to be executed by this module.",
+ 'Click the "Get parameters from script" button to detect required inputs for your script:',
+ "each input will have its own setting created, allowing you to pass data from CellProfiler to ImageJ.",
+ "After filling in any required inputs you can run the module normally.",
+ "Note: ImageJ will only be initialized once per CellProfiler session.",
+ "Note: only numeric, text and image parameters are currently supported.",
+ "See also ImageJ Scripting: https://imagej.net/Scripting.",
+ ]
+ self.set_notes([" ".join(module_explanation)])
+
+ self.init_choice = Choice(
+ "Initialization type",
+ [ijserver.INIT_LOCAL, ijserver.INIT_ENDPOINT, ijserver.INIT_LATEST],
+ tooltips={
+ ijserver.INIT_LOCAL: "Use a local ImageJ/Fiji installation",
+ ijserver.INIT_ENDPOINT: "Specify a particular endpoint",
+ ijserver.INIT_LATEST: "Use the latest Fiji, downloading if needed.",
+ },
+ doc="""\
+Note that initialization will only occur once per CellProfiler session! After initialization, these options will be
+locked for the remainder of the session.
+
+Select the mechanism for initializing ImageJ:
+ * {init_local}: Use a local Fiji or ImageJ installation
+ * {init_endpoint}: Precisely specify the version of one or more components
+ * {init_latest}: Use the latest Fiji version
+
+Note that any option besides {init_local} may result in a download of the requested components.
+ """.format(
+ init_local=ijserver.INIT_LOCAL,
+ init_endpoint=ijserver.INIT_ENDPOINT,
+ init_latest=ijserver.INIT_LATEST,
+ ),
+ )
+
+ self.endpoint_string = Text(
+ "Initialization endpoint",
+ "sc.fiji:fiji:2.1.0",
+ doc="""\
+Specify an initialization string as described in https://github.com/imagej/pyimagej/blob/master/doc/Initialization.md
+ """,
+ )
+
+ self.initialized_method = Text(
+ "Initialization type",
+ value="Do not use",
+ doc="""\
+Indicates the method that was used to initialized ImageJ in this CellProfiler session.
+ """,
+ )
+
+ self.convert_types = Binary(
+ "Adjust image type?",
+ True,
+ doc="""\
+If enabled, ensures images are always converted to unsigned integer types when sent to ImageJ, and back to signed float types when returned to CellProfiler.
+This can help common display issues by providing each application a best guess at its "expected" data type.
+If you choose to disable this function, your ImageJ script will need to account for images coming in as signed float types.
+ """,
+ )
+
+ init_display_string = ijbridge.init_method()
+ if init_display_string:
+ # ImageJ thread is already running
+ self.initialized_method.set_value(init_display_string)
+
+ self.app_directory = Directory(
+ "ImageJ directory",
+ allow_metadata=False,
+ doc="""\
+Select the folder containing the desired ImageJ/Fiji application.
+
+{fcht}
+""".format(
+ fcht=IO_FOLDER_CHOICE_HELP_TEXT
+ ),
+ )
+ if platform != "darwin":
+ self.app_directory.join_parts(ABSOLUTE_FOLDER_NAME, "Fiji.app")
+
+ def set_directory_fn_app(path):
+ dir_choice, custom_path = self.app_directory.get_parts_from_path(path)
+ self.app_directory.join_parts(dir_choice, custom_path)
+
+ self.app_file = Filename(
+ "Local App",
+ "Fiji.app",
+ doc="Select the desired app, such as Fiji.app",
+ get_directory_fn=self.app_directory.get_absolute_path,
+ set_directory_fn=set_directory_fn_app,
+ browse_msg="Choose local application",
+ )
+
+ self.script_directory = Directory(
+ "Script directory",
+ allow_metadata=False,
+ doc="""\
+Select the folder containing the script.
+
+{fcht}
+""".format(
+ fcht=IO_FOLDER_CHOICE_HELP_TEXT
+ ),
+ )
+
+ def set_directory_fn_script(script_path):
+ dir_choice, custom_path = self.script_directory.get_parts_from_path(
+ script_path
+ )
+ self.script_directory.join_parts(dir_choice, custom_path)
+ self.clear_script_parameters()
+
+ self.script_file = Filename(
+ "ImageJ Script",
+ "script.py",
+ doc="Select a script file written in any ImageJ-supported scripting language.",
+ get_directory_fn=self.script_directory.get_absolute_path,
+ set_directory_fn=set_directory_fn_script,
+ browse_msg="Choose ImageJ script file",
+ )
+ self.get_parameters_button = DoSomething(
+ "",
+ "Get parameters from script",
+ self.get_parameters_helper,
+ doc="""\
+Parse parameters from the currently selected script and add the appropriate settings to this CellProfiler module.
+
+Note: this must be done each time you change the script, before running the CellProfiler pipeline!
+""",
+ )
+ self.script_parameter_list = []
+ self.script_input_settings = (
+ {}
+ ) # Map of input parameter names to CellProfiler settings objects
+ self.script_output_settings = (
+ {}
+ ) # Map of output parameter names to CellProfiler settings objects
+ self.script_parameter_count = HiddenCount(self.script_parameter_list)
+
+ def get_init_string(self):
+ """
+ Determine if a particular initialization method has been specified. This could be a path to a local installation
+ or a version string.
+ """
+ choice = self.init_choice.get_value()
+ if choice == ijserver.INIT_LATEST:
+ return None
+
+ if choice == ijserver.INIT_LOCAL:
+ init_string = self.app_directory.get_absolute_path()
+ if platform == "darwin":
+ init_string = path.join(init_string, self.app_file.value)
+ elif choice == ijserver.INIT_ENDPOINT:
+ init_string = self.endpoint_string.get_value()
+
+ return init_string
+
+ def clear_script_parameters(self):
+ """
+ Remove any existing settings added by scripts
+ """
+ self.script_parameter_list.clear()
+ self.script_input_settings.clear()
+ self.script_output_settings.clear()
+ self.parsed_params = False
+ self.initialization_failed = False
+
+ def get_parameters_helper(self):
+ """
+ Helper method to launch get_parameters_from_script on a thread so that it isn't run on the GUI thread, since
+ it may be slow (when initializing pyimagej).
+ """
+ # Reset previously parsed parameters
+ self.clear_script_parameters()
+
+ global stop_progress_thread
+ stop_progress_thread = False
+
+ progress_gauge = Gauge(Window.FindFocus(), -1, size=(100, -1))
+ progress_gauge.Show(True)
+
+ parse_param_thread = Thread(
+ target=self.get_parameters_from_script,
+ name="Parse Parameters Thread",
+ daemon=True,
+ )
+ parse_param_thread.start()
+
+ while True:
+ # Wait for get_parameters_from_script to finish
+ progress_gauge.Pulse()
+ time.sleep(0.025)
+ if stop_progress_thread:
+ progress_gauge.Show(False)
+ break
+
+ if not self.initialization_failed:
+ self.parsed_params = True
+
+ def init_pyimagej(self):
+ self.initialization_failed = False
+ init_string = self.get_init_string()
+ if ijbridge.init_pyimagej(init_string):
+ init_display_string = self.init_choice.get_value()
+ if init_display_string != ijserver.INIT_LATEST:
+ init_display_string += ": " + init_string
+ self.initialized_method.set_value(init_display_string)
+ else:
+ self.initialization_failed = True
+
+ def get_parameters_from_script(self):
+ """
+ Use PyImageJ to read header text from an ImageJ script and extract inputs/outputs, which are then converted to
+ CellProfiler settings for this module
+ """
+ global stop_progress_thread
+ script_filepath = path.join(
+ self.script_directory.get_absolute_path(), self.script_file.value
+ )
+
+ if not self.script_file.value or not path.exists(script_filepath):
+ # nothing to do
+ stop_progress_thread = True
+ return
+
+ # start the imagej server if needed
+ ijbridge.start_imagej_server()
+
+ # Start pyimagej if needed
+ self.init_pyimagej()
+ if self.initialization_failed == True:
+ stop_progress_thread = True
+ return
+
+ # Tell pyimagej to parse the script parameters
+ lock = ijbridge.lock()
+ lock.acquire()
+ ijbridge.to_imagej().put(
+ {
+ ijserver.PYIMAGEJ_KEY_COMMAND: ijserver.PYIMAGEJ_CMD_SCRIPT_PARSE,
+ ijserver.PYIMAGEJ_KEY_INPUT: script_filepath,
+ }
+ )
+
+ ij_return = ijbridge.from_imagej().get()
+ lock.release()
+
+ # Process pyimagej's output, converting script parameters to settings
+ if ij_return != ijserver.PYIMAGEJ_STATUS_CMD_UNKNOWN:
+ input_params = ij_return[ijserver.PYIMAGEJ_SCRIPT_PARSE_INPUTS]
+ output_params = ij_return[ijserver.PYIMAGEJ_SCRIPT_PARSE_OUTPUTS]
+
+ for param_dict, settings_dict, io_class in (
+ (input_params, self.script_input_settings, ijserver.INPUT_CLASS),
+ (output_params, self.script_output_settings, ijserver.OUTPUT_CLASS),
+ ):
+ for param_name in param_dict:
+ param_type = param_dict[param_name]
+ next_setting = ijserver.convert_java_type_to_setting(
+ param_name, param_type, io_class
+ )
+ if next_setting is not None:
+ settings_dict[param_name] = next_setting
+ group = SettingsGroup()
+ group.append("setting", next_setting)
+ group.append(
+ "remover",
+ RemoveSettingButton(
+ "",
+ "Remove this variable",
+ self.script_parameter_list,
+ group,
+ ),
+ )
+ add_param_info_settings(group, param_name, param_type, io_class)
+ # Each setting gets a group containing:
+ # 0 - the setting
+ # 1 - its remover
+ # 2 - (hidden) parameter name
+ # 3 - (hidden) parameter type
+ # 4 - (hidden) parameter i/o class
+ self.script_parameter_list.append(group)
+
+ stop_progress_thread = True
+
+ def settings(self):
+ result = [
+ self.script_parameter_count,
+ self.init_choice,
+ self.app_directory,
+ self.app_file,
+ self.endpoint_string,
+ self.script_directory,
+ self.script_file,
+ self.get_parameters_button,
+ self.convert_types,
+ ]
+ if len(self.script_parameter_list) > 0:
+ result += [Divider(line=True)]
+ for script_parameter_group in self.script_parameter_list:
+ if isinstance(script_parameter_group.setting, Iterable):
+ for s in script_parameter_group.setting:
+ result += [s]
+ else:
+ result += [script_parameter_group.setting]
+ result += [script_parameter_group.remover]
+ result += [script_parameter_group.name]
+ result += [script_parameter_group.type]
+ result += [script_parameter_group.io_class]
+
+ return result
+
+ def visible_settings(self):
+ visible_settings = []
+
+ # Update the visible settings based on the selected initialization method
+ # If ImageJ is already initialized we just want to report how it was initialized
+ # Otherwise we show: a string entry for "endpoint", a directory chooser for "local" (and file chooser if on mac),
+ # and nothing if "latest"
+ init_method = ijbridge.init_method()
+ if not init_method:
+ # ImageJ is not initialized yet
+ visible_settings += [self.init_choice]
+ input_type = self.init_choice.get_value()
+ if input_type == ijserver.INIT_ENDPOINT:
+ visible_settings += [self.endpoint_string]
+ elif input_type == ijserver.INIT_LOCAL:
+ visible_settings += [self.app_directory]
+ if platform == "darwin":
+ visible_settings += [self.app_file]
+ else:
+ # ImageJ is initialized
+ self.initialized_method.set_value(init_method)
+ visible_settings += [self.initialized_method]
+ visible_settings += [Divider(line=True)]
+ visible_settings += [
+ self.script_directory,
+ self.script_file,
+ self.get_parameters_button,
+ self.convert_types,
+ ]
+ if len(self.script_parameter_list) > 0:
+ visible_settings += [Divider(line=True)]
+ for script_parameter in self.script_parameter_list:
+ if isinstance(script_parameter.setting, Iterable):
+ for s in script_parameter.setting:
+ visible_settings += [s]
+ else:
+ visible_settings += [script_parameter.setting]
+ visible_settings += [script_parameter.remover]
+
+ return visible_settings
+
+ def prepare_settings(self, setting_values):
+ # Start the ImageJ server here if it's not already running
+ # This ensures the server is started from the main process after the
+ # GUI has spun up
+ ijbridge.start_imagej_server()
+
+ settings_count = int(setting_values[0])
+
+ if settings_count == 0:
+ # No params were saved
+ return
+
+ # Params were parsed previously and saved
+ self.parsed_params = True
+
+ # Settings are stored sequentially as (value(s), remover, name, type, io_class)
+ # Since some settings have multiple values for a setting we have to work backwards
+ i = len(setting_values) - 1
+ loaded_settings = []
+ while settings_count > 0:
+ group = SettingsGroup()
+ # get the name, type and class
+ param_name = setting_values[i - 2]
+ param_type = setting_values[i - 1]
+ io_class = setting_values[i]
+ setting = ijserver.convert_java_type_to_setting(
+ param_name, param_type, io_class
+ )
+ # account for remover, name, type and io_class
+ i -= 4
+ # account for the number of values in this setting
+ if isinstance(setting, Iterable):
+ i -= len(setting)
+ else:
+ i -= 1
+ group.append("setting", setting)
+ group.append(
+ "remover",
+ RemoveSettingButton(
+ "", "Remove this variable", self.script_parameter_list, group
+ ),
+ )
+ add_param_info_settings(group, param_name, param_type, io_class)
+ loaded_settings.append(group)
+ if ijserver.INPUT_CLASS == io_class:
+ self.script_input_settings[param_name] = setting
+ elif ijserver.OUTPUT_CLASS == io_class:
+ self.script_output_settings[param_name] = setting
+ settings_count -= 1
+
+ # add the loaded settings to our overall list, in proper order
+ loaded_settings.reverse()
+ for s in loaded_settings:
+ self.script_parameter_list.append(s)
+
+ def validate_module(self, pipeline):
+ if self.initialization_failed:
+ raise ValidationError(
+ "Error starting ImageJ. Please check your initialization settings and try again.",
+ self.init_choice,
+ )
+
+ no_script_msg = 'Please select a valid ImageJ script and use the "Get parameters from script" button.'
+
+ if (
+ not self.parsed_params
+ or not self.script_directory
+ or not self.script_file.value
+ ):
+ raise ValidationError(no_script_msg, self.script_file)
+
+ script_filepath = path.join(
+ self.script_directory.get_absolute_path(), self.script_file.value
+ )
+ if not path.exists(script_filepath):
+ raise ValidationError(
+ "The script you have selected is not a valid path. " + no_script_msg,
+ self.script_file,
+ )
+
+ if self.init_choice.get_value() == ijserver.INIT_LOCAL:
+ app_path = self.get_init_string()
+ if not path.exists(app_path):
+ raise ValidationError(
+ "The local application you have selected is not a valid path.",
+ self.app_directory,
+ )
+
+ def validate_module_warnings(self, pipeline):
+ """Warn user if the specified FIJI executable directory is not found, and warn that a copy of FIJI will be downloaded"""
+ warn_msg = 'Please note: for any initialization method except "Local", a new Fiji may be downloaded'
+ " to your machine if cached dependencies not found."
+ init_type = self.init_choice.get_value()
+ if init_type != ijserver.INIT_LOCAL:
+ # The component we attach the error to depends on if initialization has happened or not
+ if not ijbridge.init_method():
+ raise ValidationError(warn_msg, self.init_choice)
+
+ def run(self, workspace):
+ self.init_pyimagej()
+
+ # Unwrap the current settings from their SettingsGroups
+ all_settings = list(map(lambda x: x.settings[0], self.script_parameter_list))
+ # Update the script input/output settings in case any were removed from the GUI
+ self.script_input_settings = {k: v for (k,v) in self.script_input_settings.items() if v in all_settings}
+ self.script_output_settings = {k: v for (k,v) in self.script_output_settings.items() if v in all_settings}
+
+ if self.show_window:
+ workspace.display_data.script_input_pixels = {}
+ workspace.display_data.script_input_dimensions = {}
+ workspace.display_data.script_output_pixels = {}
+ workspace.display_data.script_output_dimensions = {}
+
+ script_filepath = path.join(
+ self.script_directory.get_absolute_path(), self.script_file.value
+ )
+ # convert the CP settings to script parameters for pyimagej
+ script_inputs = {}
+ for name in self.script_input_settings:
+ setting = self.script_input_settings[name]
+ if isinstance(setting, ImageSubscriber):
+ # Images need to be pulled from the workspace
+ script_inputs[name] = workspace.image_set.get_image(setting.get_value())
+ if self.show_window:
+ workspace.display_data.script_input_pixels[name] = script_inputs[
+ name
+ ].pixel_data
+ workspace.display_data.script_input_dimensions[
+ name
+ ] = script_inputs[name].dimensions
+ elif isinstance(setting, Iterable):
+ # Currently the only supported multi-part setting is a Filename + Directory
+ setting_dir = setting[0]
+ setting_file = setting[1]
+ script_inputs[name] = path.join(
+ setting_dir.get_absolute_path(), setting_file.value
+ )
+ else:
+ # Other settings can be read directly
+ script_inputs[name] = setting.get_value()
+
+ # Start the script
+ lock = ijbridge.lock()
+ lock.acquire()
+ ijbridge.to_imagej().put(
+ {
+ ijserver.PYIMAGEJ_KEY_COMMAND: ijserver.PYIMAGEJ_CMD_SCRIPT_RUN,
+ ijserver.PYIMAGEJ_KEY_INPUT: {
+ ijserver.PYIMAGEJ_SCRIPT_RUN_FILE_KEY: script_filepath,
+ ijserver.PYIMAGEJ_SCRIPT_RUN_INPUT_KEY: script_inputs,
+ ijserver.PYIMAGEJ_SCRIPT_RUN_CONVERT_IMAGES: self.convert_types.value,
+ },
+ }
+ )
+
+ # Retrieve script output
+ ij_return = ijbridge.from_imagej().get()
+ lock.release()
+
+ if ij_return != ijserver.PYIMAGEJ_STATUS_CMD_UNKNOWN:
+ script_outputs = ij_return[ijserver.PYIMAGEJ_KEY_OUTPUT]
+ for name in self.script_output_settings:
+ output_key = self.script_output_settings[name].get_value()
+ output_value = script_outputs[name]
+ # FIXME should only do this for image outputs
+ # convert back to floats for CellProfiler
+ if self.convert_types.value:
+ output_value = skimage.img_as_float(output_value)
+ output_image = Image(image=output_value, convert=False)
+ workspace.image_set.add(output_key, output_image)
+ if self.show_window:
+ workspace.display_data.script_output_pixels[
+ name
+ ] = output_image.pixel_data
+ workspace.display_data.dimensions = output_image.dimensions
+
+ def display(self, workspace, figure):
+ # TODO how do we handle differences in dimensionality between input/output images?
+ figure.set_subplots(
+ (
+ 2,
+ max(
+ len(workspace.display_data.script_input_pixels),
+ len(workspace.display_data.script_output_pixels),
+ ),
+ ),
+ dimensions=2,
+ )
+
+ i = 0
+ for name in workspace.display_data.script_input_pixels:
+ figure.subplot_imshow_grayscale(
+ 0,
+ i,
+ workspace.display_data.script_input_pixels[name],
+ title="Input image: {}".format(name),
+ )
+ i += 1
+
+ i = 0
+ for name in workspace.display_data.script_output_pixels:
+ figure.subplot_imshow_grayscale(
+ 1,
+ i,
+ workspace.display_data.script_output_pixels[name],
+ title="Output image: {}".format(name),
+ sharexy=figure.subplot(0, i),
+ )
+ i += 1
+
+ def upgrade_settings(self, setting_values, variable_revision_number, module_name):
+ if variable_revision_number == 1:
+ # Added convert_types Binary setting
+ setting_values = setting_values[:8] + [True] + setting_values[8:]
+ variable_revision_number = 2
+ if variable_revision_number == 2:
+ # Allowed multiple settings per parameter
+ # Force re-parsing of parameters
+ setting_values[0] = "0"
+ variable_revision_number = 3
+
+ return setting_values, variable_revision_number
diff --git a/CP5/active_plugins/runomnipose.py b/CP5/active_plugins/runomnipose.py
new file mode 100644
index 00000000..ae8c2b9b
--- /dev/null
+++ b/CP5/active_plugins/runomnipose.py
@@ -0,0 +1,655 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import numpy
+import os
+from cellpose_omni import models, io, core, plot
+import ncolor
+import omnipose
+from skimage.transform import resize
+import importlib.metadata
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+from cellprofiler_core.image import Image
+from cellprofiler_core.module.image_segmentation import ImageSegmentation
+from cellprofiler_core.object import Objects
+from cellprofiler_core.setting import Binary
+from cellprofiler_core.setting.choice import Choice
+from cellprofiler_core.setting.do_something import DoSomething
+from cellprofiler_core.setting.subscriber import ImageSubscriber
+from cellprofiler_core.setting.text import (
+ Integer,
+ ImageName,
+ Directory,
+ Filename,
+ Float,
+)
+
+CUDA_LINK = "https://pytorch.org/get-started/locally/"
+Cellpose_link = " https://doi.org/10.1038/s41592-020-01018-x"
+Omnipose_link = "https://doi.org/10.1101/2021.11.03.467199"
+cellpose_ver = importlib.metadata.version("cellpose")
+
+__doc__ = f"""\
+RunOmnipose
+===========
+
+**RunOmnipose** uses a pre-trained machine learning model (Omnipose) to detect cells or nuclei in an image.
+
+This module is useful for automating simple segmentation tasks in CellProfiler.
+The module accepts greyscale input images and produces an object set. Probabilities can also be captured as an image.
+
+Loading in a model will take slightly longer the first time you run it each session. When evaluating
+performance you may want to consider the time taken to predict subsequent images.
+
+This module now also supports Ominpose. Omnipose builds on Cellpose, for the purpose of **RunOmnipose** it adds 2 additional
+features: additional models; bact-omni and cyto2-omni which were trained using the Omnipose architechture, and bact
+and the mask reconstruction algorithm for Omnipose that was created to solve over-segemnation of large cells; useful for bacterial cells,
+but can be used for other arbitrary and anisotropic shapes. You can mix and match Omnipose models with Cellpose style masking or vice versa.
+
+The module has been updated to be compatible with the latest release of Cellpose. From the old version of the module the 'cells' model corresponds to 'cyto2' model.
+
+Installation:
+
+It is necessary that you have installed Cellpose version >= 1.0.2
+
+You'll want to run `pip install cellpose` on your CellProfiler Python environment to setup Cellpose. If you have an older version of Cellpose
+run 'python -m pip install cellpose --upgrade'.
+
+To use Omnipose models, and mask reconstruction method you'll want to install Omnipose 'pip install omnipose' and Cellpose version 1.0.2 'pip install cellpose==1.0.2'.
+
+On the first time loading into CellProfiler, Cellpose will need to download some model files from the internet. This
+may take some time. If you want to use a GPU to run the model, you'll need a compatible version of PyTorch and a
+supported GPU. Instructions are avaiable at this link: {CUDA_LINK}
+
+Stringer, C., Wang, T., Michaelos, M. et al. Cellpose: a generalist algorithm for cellular segmentation. Nat Methods 18, 100–106 (2021). {Cellpose_link}
+Kevin J. Cutler, Carsen Stringer, Paul A. Wiggins, Joseph D. Mougous. Omnipose: a high-precision morphology-independent solution for bacterial cell segmentation. bioRxiv 2021.11.03.467199. {Omnipose_link}
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES NO
+============ ============ ===============
+
+"""
+
+model_dic = models.MODEL_NAMES
+model_dic.append("custom")
+
+
+class RunOmnipose(ImageSegmentation):
+ category = "Object Processing"
+
+ module_name = "RunOmnipose"
+
+ variable_revision_number = 3
+
+ doi = {
+ "Please cite the following when using RunOmnipose:": "https://doi.org/10.1038/s41592-020-01018-x",
+ "If you are using Omnipose also cite the following:": "https://doi.org/10.1101/2021.11.03.467199",
+ }
+
+ def create_settings(self):
+ super(RunOmnipose, self).create_settings()
+
+ self.expected_diameter = Integer(
+ text="Expected object diameter",
+ value=0,
+ minval=0,
+ doc="""\
+ The average diameter of the objects to be detected. Setting this to 0 will attempt to automatically detect object size.
+ Note that automatic diameter mode does not work when running on 3D images.
+
+ Cellpose models come with a pre-defined object diameter. Your image will be resized during detection to attempt to
+ match the diameter expected by the model. The default models have an expected diameter of ~16 pixels, if trying to
+ detect much smaller objects it may be more efficient to resize the image first using the Resize module.
+ """,
+ )
+
+ self.mode = Choice(
+ text="Model",
+ choices=model_dic,
+ value="cyto2",
+ doc="""\
+ CellPose comes with models for detecting nuclei or cells. Alternatively, you can supply a custom-trained model
+ generated using the command line or Cellpose GUI. Custom models can be useful if working with unusual cell types.
+ """,
+ )
+
+ self.omni = Binary(
+ text="Use Omnipose for mask reconstruction",
+ value=True,
+ doc="""If enabled, Omnipose mask recontruction features will be used. """,
+ )
+
+ self.cluster = Binary(
+ text="DBSCAN clustering",
+ value=False,
+ doc="""Use DBSCAN clustering to solve over-segmentation of thin regions. """,
+ )
+
+ self.verbose = Binary(
+ text="verbose", value=True, doc="""Turn on verbose output."""
+ )
+
+ self.tile = Binary(
+ text="tile",
+ value=False,
+ doc="""Tile image for running model and stitch outputs.""",
+ )
+
+ self.ncolor = Binary(
+ text="ncolor", value=True, doc="""Display masks in ncolor mode."""
+ )
+
+ self.do_3D = Binary(
+ text="Use 3D",
+ value=False,
+ doc="""If enabled, 3D specific settings will be available.""",
+ )
+
+ self.use_gpu = Binary(
+ text="Use GPU",
+ value=False,
+ doc="""\
+ If enabled, Cellpose will attempt to run detection on your system's graphics card (GPU).
+ Note that you will need a CUDA-compatible GPU and correctly configured PyTorch version, see this link for details:
+ {CUDA_LINK}
+
+ If disabled or incorrectly configured, Cellpose will run on your CPU instead. This is much slower but more compatible
+ with different hardware setups.
+
+ Note that, particularly when in 3D mode, lack of GPU memory can become a limitation. If a model crashes you may need to
+ re-start CellProfiler to release GPU memory. Resizing large images prior to running them through the model can free up
+ GPU memory.
+ """,
+ )
+
+ self.use_averaging = Binary(
+ text="Use averaging",
+ value=False,
+ doc="""\
+ If enabled, CellPose will run it's 4 inbuilt models and take a consensus to determine the results. If disabled, only a
+ single model will be called to produce results. Disabling averaging is faster to run but less accurate.""",
+ )
+
+ self.invert = Binary(
+ text="Invert images",
+ value=False,
+ doc="""\
+ If enabled the image will be inverted and also normalized.
+ For use with fluorescence images using bact model (bact model was trained on phase images""",
+ )
+
+ self.supply_nuclei = Binary(
+ text="Supply nuclei image as well?",
+ value=False,
+ doc="""
+ When detecting whole cells, you can provide a second image featuring a nuclear stain to assist
+ the model with segmentation. This can help to split touching cells.""",
+ )
+
+ self.nuclei_image = ImageSubscriber(
+ "Select the nuclei image",
+ doc="Select the image you want to use as the nuclear stain.",
+ )
+
+ self.save_probabilities = Binary(
+ text="Save probability image?",
+ value=False,
+ doc="""
+ If enabled, the probability scores from the model will be recorded as a new image.
+ Probability >0 is considered as being part of a cell.
+ You may want to use a higher threshold to manually generate objects.""",
+ )
+
+ self.probabilities_name = ImageName(
+ "Name the probability image",
+ "Probabilities",
+ doc="Enter the name you want to call the probability image produced by this module.",
+ )
+
+ self.model_directory = Directory(
+ "Location of the pre-trained model file",
+ doc=f"""\
+ *(Used only when using a custom pre-trained model)*
+ Select the location of the pre-trained CellPose model file that will be used for detection.""",
+ )
+
+ def get_directory_fn():
+ """Get the directory for the rules file name"""
+ return self.model_directory.get_absolute_path()
+
+ def set_directory_fn(path):
+ dir_choice, custom_path = self.model_directory.get_parts_from_path(path)
+
+ self.model_directory.join_parts(dir_choice, custom_path)
+
+ self.model_file_name = Filename(
+ "Pre-trained model file name",
+ "cyto_0",
+ get_directory_fn=get_directory_fn,
+ set_directory_fn=set_directory_fn,
+ doc=f"""\
+*(Used only when using a custom pre-trained model)*
+This file can be generated by training a custom model withing the CellPose GUI or command line applications.""",
+ )
+
+ self.gpu_test = DoSomething(
+ "",
+ "Test GPU",
+ self.do_check_gpu,
+ doc=f"""\
+ Press this button to check whether a GPU is correctly configured.
+
+ If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the
+ required dependencies are not installed.
+ If you have multiple GPUs on your system, this button will only test the first one.
+ """,
+ )
+
+ self.flow_threshold = Float(
+ text="Flow threshold",
+ value=0.0,
+ minval=0,
+ doc="""\
+ The flow_threshold parameter is the maximum allowed error of the flows for each mask. The default is flow_threshold=0.4.
+ Increase this threshold if cellpose is not returning as many masks as you’d expect.
+ Similarly, decrease this threshold if cellpose is returning too many ill-shaped masks
+ """,
+ )
+
+ self.mask_threshold = Float(
+ text="Distance field threshold",
+ value=0.0,
+ minval=-6.0,
+ maxval=6.0,
+ doc=f"""\
+Cell probability threshold (all pixels with probability above threshold kept for masks). Recommended default is 0.0.
+Values vary from -6 to 6
+""",
+ )
+
+ self.manual_GPU_memory_share = Float(
+ text="GPU memory share for each worker",
+ value=0.1,
+ minval=0.0000001,
+ maxval=1,
+ doc="""\
+Fraction of the GPU memory share available to each worker. Value should be set such that this number times the number
+of workers in each copy of CellProfiler times the number of copies of CellProfiler running (if applicable) is <1
+""",
+ )
+
+ self.stitch_threshold = Float(
+ text="Stitch Threshold",
+ value=0.0,
+ minval=0,
+ doc=f"""\
+There may be additional differences in YZ and XZ slices that make them unable to be used for 3D segmentation.
+In those instances, you may want to turn off 3D segmentation (do_3D=False) and run instead with stitch_threshold>0.
+Cellpose will create masks in 2D on each XY slice and then stitch them across slices if the IoU between the mask on the current slice and the next slice is greater than or equal to the stitch_threshold.
+""",
+ )
+
+ self.min_size = Integer(
+ text="Minimum size",
+ value=3,
+ minval=-1,
+ doc="""\
+Minimum number of pixels per mask, can turn off by setting value to -1
+""",
+ )
+
+ def settings(self):
+ return [
+ self.x_name,
+ self.expected_diameter,
+ self.mode,
+ self.y_name,
+ self.use_gpu,
+ self.use_averaging,
+ self.supply_nuclei,
+ self.nuclei_image,
+ self.save_probabilities,
+ self.probabilities_name,
+ self.model_directory,
+ self.model_file_name,
+ self.flow_threshold,
+ self.mask_threshold,
+ self.manual_GPU_memory_share,
+ self.stitch_threshold,
+ self.do_3D,
+ self.min_size,
+ self.omni,
+ self.ncolor,
+ self.cluster,
+ self.invert,
+ self.verbose,
+ self.tile,
+ ]
+
+ def visible_settings(self):
+ if float(cellpose_ver[0:3]) >= 0.6 and int(cellpose_ver[0]) < 2:
+ vis_settings = [self.mode, self.omni, self.x_name]
+ else:
+ vis_settings = [self.mode, self.x_name]
+
+ if self.mode.value != "nuclei":
+ vis_settings += [self.supply_nuclei]
+ if self.supply_nuclei.value:
+ vis_settings += [self.nuclei_image]
+ if self.mode.value == "custom":
+ vis_settings += [
+ self.model_directory,
+ self.model_file_name,
+ ]
+
+ vis_settings += [
+ self.expected_diameter,
+ self.mask_threshold,
+ self.min_size,
+ self.flow_threshold,
+ self.y_name,
+ self.invert,
+ self.save_probabilities,
+ ]
+
+ vis_settings += [
+ self.do_3D,
+ self.stitch_threshold,
+ self.verbose,
+ self.tile,
+ self.cluster,
+ self.ncolor,
+ ]
+
+ if self.do_3D.value:
+ vis_settings.remove(self.stitch_threshold)
+
+ if self.save_probabilities.value:
+ vis_settings += [self.probabilities_name]
+
+ vis_settings += [self.use_averaging, self.use_gpu]
+
+ if self.use_gpu.value:
+ vis_settings += [self.gpu_test, self.manual_GPU_memory_share]
+
+ return vis_settings
+
+ def validate_module(self, pipeline):
+ """If using custom model, validate the model file opens and works"""
+ if self.mode.value == "custom":
+ model_file = self.model_file_name.value
+ model_directory = self.model_directory.get_absolute_path()
+ model_path = os.path.join(model_directory, model_file)
+ try:
+ open(model_path)
+ except:
+ raise ValidationError(
+ "Failed to load custom file: %s " % model_path,
+ self.model_file_name,
+ )
+ try:
+ model = models.CellposeModel(
+ pretrained_model=model_path, gpu=self.use_gpu.value
+ )
+ except:
+ raise ValidationError(
+ "Failed to load custom model: %s " % model_path,
+ self.model_file_name,
+ )
+
+ def run(self, workspace):
+ print("run")
+ print(self.mode.value)
+ if self.mode.value != "custom":
+ model = models.CellposeModel(
+ model_type=self.mode.value, # VS cellpose(), not source of bug tho
+ gpu=self.use_gpu.value,
+ )
+
+ else:
+ model_file = self.model_file_name.value
+ model_directory = self.model_directory.get_absolute_path()
+ model_path = os.path.join(model_directory, model_file)
+ model = models.CellposeModel(
+ pretrained_model=model_path, gpu=self.use_gpu.value
+ )
+
+ if self.use_gpu.value and model.torch:
+ from torch import cuda
+
+ cuda.set_per_process_memory_fraction(self.manual_GPU_memory_share.value)
+
+ x_name = self.x_name.value
+ y_name = self.y_name.value
+ images = workspace.image_set
+ x = images.get_image(x_name)
+ dimensions = x.dimensions
+ x_data = x.pixel_data
+ anisotropy = 0.0
+
+ if self.do_3D.value:
+ anisotropy = x.spacing[0] / x.spacing[1]
+
+ if x.multichannel:
+ raise ValueError(
+ "Color images are not currently supported. Please provide greyscale images."
+ )
+
+ if self.mode.value != "nuclei" and self.supply_nuclei.value:
+ nuc_image = images.get_image(self.nuclei_image.value)
+ # CellPose expects RGB, we'll have a blank red channel, cells in green and nuclei in blue.
+ if self.do_3D.value:
+ x_data = numpy.stack(
+ (numpy.zeros_like(x_data), x_data, nuc_image.pixel_data), axis=1
+ )
+
+ else:
+ x_data = numpy.stack(
+ (numpy.zeros_like(x_data), x_data, nuc_image.pixel_data), axis=-1
+ )
+
+ channels = [2, 3]
+ else:
+ channels = [0, 0]
+
+ diam = (
+ self.expected_diameter.value if self.expected_diameter.value > 0 else None
+ )
+
+ try:
+ y_data, flows, *_ = model.eval(
+ x_data,
+ channels=channels,
+ diameter=diam,
+ net_avg=self.use_averaging.value,
+ do_3D=self.do_3D.value,
+ anisotropy=anisotropy,
+ flow_threshold=self.flow_threshold.value,
+ mask_threshold=self.mask_threshold.value,
+ stitch_threshold=self.stitch_threshold.value,
+ min_size=self.min_size.value,
+ omni=self.omni.value,
+ cluster=self.cluster.value,
+ invert=self.invert.value,
+ verbose=self.verbose.value,
+ tile=self.tile.value,
+ )
+
+ y = Objects()
+ y.segmented = y_data
+
+ except Exception as a:
+ print(f"Unable to create masks. Check your module settings. {a}")
+ finally:
+ if self.use_gpu.value and model.torch:
+ # Try to clear some GPU memory for other worker processes.
+ try:
+ cuda.empty_cache()
+ except Exception as e:
+ print(
+ f"Unable to clear GPU memory. You may need to restart CellProfiler to change models. {e}"
+ )
+
+ y.parent_image = x.parent_image
+ objects = workspace.object_set
+ objects.add_objects(y, y_name)
+
+ if self.save_probabilities.value:
+ # Flows come out sized relative to CellPose's inbuilt model size.
+ # We need to slightly resize to match the original image.
+ size_corrected = resize(flows[2], y_data.shape)
+ prob_image = Image(
+ size_corrected,
+ parent_image=x.parent_image,
+ convert=False,
+ dimensions=len(size_corrected.shape),
+ )
+
+ workspace.image_set.add(self.probabilities_name.value, prob_image)
+
+ if self.show_window:
+ workspace.display_data.probabilities = size_corrected
+
+ self.add_measurements(workspace)
+
+ if self.show_window:
+ if x.volumetric:
+ # Can't show CellPose-accepted colour images in 3D
+ workspace.display_data.x_data = x.pixel_data
+ else:
+ workspace.display_data.x_data = x_data
+ workspace.display_data.y_data = y_data
+ workspace.display_data.outlines = plot.outline_view(x_data, y_data)
+ workspace.display_data.flowsRGB = flows[0]
+ workspace.display_data.distance = flows[2]
+ workspace.display_data.boundary = flows[4]
+
+ workspace.display_data.dimensions = dimensions
+
+ def display(self, workspace, figure):
+ N = 5 # image, 2 network predictions, segmentation, outlines
+ if self.save_probabilities.value:
+ layout = (N, 2)
+ else:
+ layout = (N, 1)
+
+ figure.set_subplots(
+ dimensions=workspace.display_data.dimensions, subplots=layout
+ )
+ # show the image
+ col = 0
+ figure.subplot_imshow(
+ colormap="gray",
+ image=workspace.display_data.x_data,
+ title="Input Image",
+ x=col,
+ y=0,
+ )
+
+ # show the network outputs
+ col += 1
+ figure.subplot_imshow(
+ image=workspace.display_data.flowsRGB,
+ sharexy=figure.subplot(0, 0),
+ title="predicted flows",
+ x=col,
+ y=0,
+ )
+ col += 1
+ figure.subplot_imshow(
+ colormap="magma",
+ image=workspace.display_data.distance,
+ sharexy=figure.subplot(0, 0),
+ title="predicted distance",
+ x=col,
+ y=0,
+ )
+
+ # boundary logits output not really relevant
+ # col += 1
+ # figure.subplot_imshow(
+ # colormap='viridis',
+ # image=workspace.display_data.boundary,
+ # sharexy=figure.subplot(0, 0),
+ # title='predicted boundary logits',
+ # x=col,
+ # y=0,
+ # )
+
+ # show the segmentation (change to ncolor)
+ col += 1
+ m = workspace.display_data.y_data
+ from omnipose.utils import sinebow, rescale
+ from matplotlib.colors import ListedColormap
+
+ cmap = ListedColormap([color for color in list(sinebow(m.max()).values())[1:]])
+ pic = ncolor.label(m) if self.ncolor else m
+ pic = cmap(rescale(pic))
+ pic[:, :, -1] = m > 0 # alpha
+ figure.subplot_imshow(
+ image=pic,
+ sharexy=figure.subplot(0, 0),
+ title=self.y_name.value,
+ x=col,
+ y=0,
+ )
+
+ # show the outlines
+ col += 1
+ figure.subplot_imshow(
+ image=workspace.display_data.outlines,
+ sharexy=figure.subplot(0, 0),
+ title="outlines",
+ x=col,
+ y=0,
+ )
+
+ if self.save_probabilities.value:
+ figure.subplot_imshow(
+ colormap="gray",
+ image=workspace.display_data.probabilities,
+ sharexy=figure.subplot(0, 0),
+ title=self.probabilities_name.value,
+ x=0,
+ y=1,
+ )
+
+ def do_check_gpu(self):
+ print("do_check_gpu")
+ import importlib.util
+
+ torch_installed = importlib.util.find_spec("torch") is not None
+ # if the old version of cellpose <2.0, then use istorch kwarg
+ if float(cellpose_ver[0:3]) >= 0.7 and int(cellpose_ver[0]) < 2:
+ GPU_works = core.use_gpu(istorch=torch_installed)
+ else: # if new version of cellpose, use use_torch kwarg
+ GPU_works = core.use_gpu(use_torch=torch_installed)
+ if GPU_works:
+ message = "GPU appears to be working correctly!"
+ else:
+ message = (
+ "GPU test failed. There may be something wrong with your configuration."
+ )
+ import wx
+
+ wx.MessageBox(message, caption="GPU Test")
+
+ def upgrade_settings(self, setting_values, variable_revision_number, module_name):
+ print("upgrade_settings")
+ if variable_revision_number == 1:
+ setting_values = setting_values + ["0.4", "0.0"]
+ variable_revision_number = 2
+ if variable_revision_number == 2:
+ setting_values = setting_values + ["0.0", False, "15", "1.0", False, False]
+ variable_revision_number = 3
+ return setting_values, variable_revision_number
diff --git a/CP5/active_plugins/runstardist.py b/CP5/active_plugins/runstardist.py
new file mode 100644
index 00000000..cea418bf
--- /dev/null
+++ b/CP5/active_plugins/runstardist.py
@@ -0,0 +1,448 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import os
+from skimage.transform import resize
+from stardist.models import StarDist2D, StarDist3D
+from csbdeep.utils import normalize
+from numba import cuda
+import tensorflow as tf
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+from cellprofiler_core.image import Image
+from cellprofiler_core.module.image_segmentation import ImageSegmentation
+from cellprofiler_core.object import Objects
+from cellprofiler_core.setting import Binary
+from cellprofiler_core.setting.choice import Choice
+from cellprofiler_core.setting.do_something import DoSomething
+from cellprofiler_core.setting.text import Integer, ImageName, Directory, Float
+from csbdeep.models.pretrained import get_registered_models
+
+__doc__ = f"""\
+RunStardist
+===========
+
+**RunStarDist** uses a pre-trained machine learning model (StarDist) to detect cells or nuclei in an image.
+This module is useful for automating simple segmentation tasks in CellProfiler.
+The module takes in input images and produces an object set. Probabilities can also be captured as an image.
+
+Loading in a model will take slightly longer the first time you run it each session. When evaluating
+performance you may want to consider the time taken to predict subsequent images.
+
+Installation:
+This can be a little tricky because of some dependency issues. We need to take care to not break CellProfiler's
+components when adding stardist to the environment.
+
+You'll want to run `pip install --no-deps csbdeep` first to grab the cbsdeep package without installing an invalid
+version of h5py (CellProfiler needs h5py 3+). Following this run `pip install tensorflow stardist` to install other
+dependencies.
+For Windows you need to install Microsoft C++ Redistributable for Visual Studio 2015, 2017 and 2019 from
+https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads
+
+If using the pre-trained models, StarDist will download each when first used.
+
+The models will automatically run on a GPU if compatible hardware is available and you have the required software.
+A guide to setting up Tensorflow GPU integration can be found at this link: https://www.tensorflow.org/install/gpu
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES NO
+============ ============ ===============
+
+"""
+
+# get available models
+_models2d, _aliases2d = get_registered_models(StarDist2D)
+_models3d, _aliases3d = get_registered_models(StarDist3D)
+
+# use first alias for model selection (if alias exists)
+models2d = [((_aliases2d[m][0] if len(_aliases2d[m]) > 0 else m), m) for m in _models2d]
+models3d = [((_aliases3d[m][0] if len(_aliases3d[m]) > 0 else m), m) for m in _models3d]
+
+
+CUSTOM_MODEL = "Custom 2D/3D"
+MODEL_OPTIONS = [("2D", StarDist2D), ("3D", StarDist3D), ("Custom 2D/3D", CUSTOM_MODEL)]
+
+GREY_1 = "Versatile (fluorescent nuclei)"
+GREY_2 = "DSB 2018 (from StarDist 2D paper)"
+COLOR_1 = "Versatile (H&E nuclei)"
+
+
+class RunStarDist(ImageSegmentation):
+ category = "Object Processing"
+
+ module_name = "RunStarDist"
+
+ variable_revision_number = 1
+
+ doi = {
+ "Please cite the following when using RunstarDist:": "https://doi.org/10.1007/978-3-030-00934-2_30",
+ "If you are using 3D also cite the following:": "https://doi.org/10.1109/WACV45572.2020.9093435",
+ }
+
+ def create_settings(self):
+ super(RunStarDist, self).create_settings()
+
+ self.model = Choice(
+ text="Model Type",
+ choices=list(zip(*MODEL_OPTIONS))[0],
+ value="2D",
+ doc="""\
+StarDist comes with models for detecting nuclei. Alternatively, you can supply a custom-trained model
+generated outside of CellProfiler within Python. Custom models can be useful if working with unusual cell types.
+""",
+ )
+
+ self.model_choice2D = Choice(
+ text="Model",
+ choices=list(zip(*models2d))[0],
+ value="Versatile (fluorescent nuclei)",
+ doc="""\
+The inbuilt fluorescent and DSB models expect greyscale images. The H&E model expects a color image as input (from
+brightfield). Custom models will require images of the type they were trained with.
+""",
+ )
+
+ self.model_choice3D = Choice(
+ text="Model",
+ choices=list(zip(*models3d))[0],
+ value="3D_demo",
+ doc="""\
+It should be noted that the models supplied with StarDist.
+""",
+ )
+
+ self.tile_image = Binary(
+ text="Tile input image?",
+ value=False,
+ doc="""\
+If enabled, the input image will be broken down into overlapping tiles.
+This can help to conserve memory when working with large images.
+
+The image is split into a set number of vertical and horizontal tiles.
+The total number of tiles will be the result of multiplying the horizontal
+and vertical tile number.""",
+ )
+
+ self.n_tiles_x = Integer(
+ text="Horizontal tiles",
+ value=1,
+ minval=1,
+ doc="""\
+Specify the number of tiles to break the image down into along the x-axis (horizontal).""",
+ )
+
+ self.n_tiles_y = Integer(
+ text="Vertical tiles",
+ value=1,
+ minval=1,
+ doc="""\
+Specify the number of tiles to break the image down into along the y-axis (vertical).""",
+ )
+
+ self.save_probabilities = Binary(
+ text="Save probability image?",
+ value=False,
+ doc="""
+If enabled, the probability scores from the model will be recorded as a new image.
+Probability scales from 0-1, with 1 representing absolute certainty of a pixel being in a cell.
+You may want to use a custom threshold to manually generate objects.""",
+ )
+
+ self.probabilities_name = ImageName(
+ "Name the probability image",
+ "Probabilities",
+ doc="Enter the name you want to call the probability image produced by this module.",
+ )
+
+ self.model_directory = Directory(
+ "Model folder",
+ doc=f"""\
+*(Used only when using a custom pre-trained model)*
+
+Select the folder containing your StarDist model. This should have the config, threshold and weights files
+exported after training.""",
+ )
+
+ self.gpu_test = DoSomething(
+ "",
+ "Test GPU",
+ self.do_check_gpu,
+ doc=f"""\
+Press this button to check whether a GPU is correctly configured.
+
+If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the
+required dependencies are not installed.
+Make sure you followed the setup instructions here: https://www.tensorflow.org/install/gpu
+
+If you don't have a GPU or it's not configured, StarDist will instead run on the CPU.
+This will be slower but should work on any system.
+""",
+ )
+ self.prob_thresh = Float(
+ text="Probability threshold",
+ value=0.5,
+ minval=0.0,
+ maxval=1.0,
+ doc="""\
+The probability threshold is the value used to determine the pixels used for mask creation
+all pixels with probability above threshold kept for masks.
+""",
+ )
+
+ self.nms_thresh = Float(
+ text="Overlap threshold",
+ value=0.4,
+ minval=0.0,
+ maxval=1.0,
+ doc=f"""\
+Prevent overlapping
+""",
+ )
+
+ self.manage_gpu = Binary(
+ text="Manually set how much GPU memory each worker can use?",
+ value=False,
+ doc="""
+If enabled, you can manually set how much of the GPU memory each worker can use.
+This is likely to provide the most benefit on Macs. Do not use in a multi-GPU setup.""",
+ )
+
+ self.manual_GPU_memory_GB = Float(
+ text="GPU memory (in GB) for each worker",
+ value=0.5,
+ minval=0.0000001,
+ maxval=30,
+ doc="""\
+Gigabytes of GPU memory available to each worker. Value should be set such that this number times the number
+of workers in each copy of CellProfiler times the number of copies of CellProfiler running (if applicable) is <1.
+The "correct" value will depend on your system's GPU, the number of workers you want to run in parallel, and
+the size of the model that you want to run; some experimentation may be needed.
+""",
+ )
+
+ def settings(self):
+ return [
+ self.x_name,
+ self.model,
+ self.y_name,
+ self.tile_image,
+ self.n_tiles_x,
+ self.n_tiles_y,
+ self.save_probabilities,
+ self.probabilities_name,
+ self.model_directory,
+ self.model_choice2D,
+ self.model_choice3D,
+ self.prob_thresh,
+ self.nms_thresh,
+ self.manage_gpu,
+ self.manual_GPU_memory_GB,
+ ]
+
+ def visible_settings(self):
+ vis_settings = [
+ self.x_name,
+ self.model,
+ ]
+
+ if self.model.value == "2D":
+ vis_settings += [self.model_choice2D]
+
+ if self.model.value == "3D":
+ vis_settings += [self.model_choice3D]
+
+ if self.model.value == CUSTOM_MODEL:
+ vis_settings += [self.model_directory]
+
+ vis_settings += [self.y_name, self.save_probabilities]
+
+ if self.save_probabilities.value:
+ vis_settings += [self.probabilities_name]
+
+ vis_settings += [self.tile_image]
+ if self.tile_image.value:
+ vis_settings += [self.n_tiles_x, self.n_tiles_y]
+
+ vis_settings += [self.prob_thresh, self.nms_thresh, self.gpu_test, self.manage_gpu]
+
+ if self.manage_gpu.value:
+ vis_settings += [self.manual_GPU_memory_GB]
+
+ return vis_settings
+
+ def run(self, workspace):
+ images = workspace.image_set
+ x = images.get_image(self.x_name.value)
+ dimensions = x.dimensions
+ x_data = x.pixel_data
+ prob_thresh = self.prob_thresh.value
+ nms_thresh = self.nms_thresh.value
+
+ # Validate some settings
+ if self.model_choice2D.value in (GREY_1, GREY_2) and x.multichannel:
+ raise ValueError(
+ "Color images are not supported by this model. Please provide greyscale images."
+ )
+ elif self.model_choice2D.value == COLOR_1 and not x.multichannel:
+ raise ValueError(
+ "Greyscale images are not supported by this model. Please provide a color overlay."
+ )
+
+ # Stolen nearly wholesale from https://wiki.ncsa.illinois.edu/display/ISL20/Managing+GPU+memory+when+using+Tensorflow+and+Pytorch
+ if self.manage_gpu.value:
+ # First, Get a list of GPU devices
+ gpus = tf.config.list_physical_devices('GPU')
+ if len(gpus) > 0:
+ # Restrict to only the first GPU.
+ tf.config.set_visible_devices(gpus[:1], device_type='GPU')
+ # Create a LogicalDevice with the appropriate memory limit
+ log_dev_conf = tf.config.LogicalDeviceConfiguration(
+ memory_limit=self.manual_GPU_memory_GB.value*1024 # 2 GB
+ )
+ # Apply the logical device configuration to the first GPU
+ tf.config.set_logical_device_configuration(
+ gpus[0],
+ [log_dev_conf])
+
+
+ if self.model.value == CUSTOM_MODEL:
+ model_directory, model_name = os.path.split(
+ self.model_directory.get_absolute_path()
+ )
+ if x.volumetric:
+ from stardist.models import StarDist3D
+
+ model = StarDist3D(
+ config=None, basedir=model_directory, name=model_name
+ )
+ else:
+ model = StarDist2D(
+ config=None, basedir=model_directory, name=model_name
+ )
+ if self.model.value == "2D":
+ model = StarDist2D.from_pretrained(self.model_choice2D.value)
+
+ if self.model.value == "3D":
+ from stardist.models import StarDist3D
+
+ model = StarDist3D.from_pretrained(self.model_choice3D.value)
+
+ tiles = None
+ if self.tile_image.value:
+ tiles = []
+ if x.volumetric:
+ tiles += [1]
+ tiles += [self.n_tiles_x.value, self.n_tiles_y.value]
+ # Handle colour channels
+ tiles += [1] * max(0, x.pixel_data.ndim - len(tiles))
+ print(x.pixel_data.shape, x.pixel_data.ndim, tiles)
+
+ if not self.save_probabilities.value:
+ # Probabilities aren't wanted, things are simple
+ data = model.predict_instances(
+ normalize(x.pixel_data),
+ return_predict=False,
+ n_tiles=tiles,
+ prob_thresh=prob_thresh,
+ nms_thresh=nms_thresh,
+ )
+ y_data = data[0]
+ else:
+ data, probs = model.predict_instances(
+ normalize(x.pixel_data),
+ return_predict=True,
+ sparse=False,
+ n_tiles=tiles,
+ prob_thresh=prob_thresh,
+ nms_thresh=nms_thresh,
+ )
+ y_data = data[0]
+
+ # Scores aren't at the same resolution as the input image.
+ # We need to slightly resize to match the original image.
+ size_corrected = resize(probs[0], y_data.shape)
+ prob_image = Image(
+ size_corrected,
+ parent_image=x.parent_image,
+ convert=False,
+ dimensions=len(size_corrected.shape),
+ )
+
+ workspace.image_set.add(self.probabilities_name.value, prob_image)
+
+ if self.show_window:
+ workspace.display_data.probabilities = size_corrected
+
+ y = Objects()
+ y.segmented = y_data
+ y.parent_image = x.parent_image
+ objects = workspace.object_set
+ objects.add_objects(y, self.y_name.value)
+
+ self.add_measurements(workspace)
+ if cuda.is_available():
+ cuda.current_context().memory_manager.deallocations.clear()
+
+ if self.show_window:
+ workspace.display_data.x_data = x_data
+ workspace.display_data.y_data = y_data
+ workspace.display_data.dimensions = dimensions
+
+ def display(self, workspace, figure):
+ if self.save_probabilities.value:
+ layout = (2, 2)
+ else:
+ layout = (2, 1)
+
+ figure.set_subplots(
+ dimensions=workspace.display_data.dimensions, subplots=layout
+ )
+
+ figure.subplot_imshow(
+ colormap="gray",
+ image=workspace.display_data.x_data,
+ title="Input Image",
+ x=0,
+ y=0,
+ )
+
+ figure.subplot_imshow_labels(
+ image=workspace.display_data.y_data,
+ sharexy=figure.subplot(0, 0),
+ title=self.y_name.value,
+ x=1,
+ y=0,
+ )
+ if self.save_probabilities.value:
+ figure.subplot_imshow(
+ colormap="gray",
+ image=workspace.display_data.probabilities,
+ sharexy=figure.subplot(0, 0),
+ title=self.probabilities_name.value,
+ x=0,
+ y=1,
+ )
+
+ def do_check_gpu(self):
+ if len(tf.config.list_physical_devices("GPU")) > 0:
+ message = "GPU appears to be working correctly!"
+ print("GPUs:", tf.config.list_physical_devices("GPU"))
+ else:
+ message = (
+ "GPU test failed. There may be something wrong with your configuration."
+ )
+ import wx
+
+ wx.MessageBox(message, caption="GPU Test")
diff --git a/CP5/active_plugins/variancetransform.py b/CP5/active_plugins/variancetransform.py
new file mode 100644
index 00000000..55e06dd8
--- /dev/null
+++ b/CP5/active_plugins/variancetransform.py
@@ -0,0 +1,193 @@
+#################################
+#
+# Imports from useful Python libraries
+#
+#################################
+
+import logging
+import numpy
+import scipy.ndimage
+
+#################################
+#
+# Imports from CellProfiler
+#
+##################################
+
+import cellprofiler_core.setting
+import cellprofiler_core.module
+from cellprofiler_core.image import Image
+from cellprofiler_core.setting import Binary
+from cellprofiler_core.setting.subscriber import ImageSubscriber
+from cellprofiler_core.setting.text import ImageName, Integer
+
+__doc__ = """\
+VarianceTransform
+=================
+**VarianceTransform** allows you to calculate the variance of an image using a set window size. It also has
+the option to find the optimal window size to obtain the maximum variance of an image within a given range.
+
+|
+
+============ ============ ===============
+Supports 2D? Supports 3D? Respects masks?
+============ ============ ===============
+YES YES YES
+============ ============ ===============
+"""
+
+
+class VarianceTransform(cellprofiler_core.module.ImageProcessing):
+ module_name = "VarianceTransform"
+
+ variable_revision_number = 1
+
+ def create_settings(self):
+ self.image_name = ImageSubscriber(
+ "Select the input image",
+ "None",
+ doc="""Select the image to be smoothed.""",
+ )
+
+ self.output_image_name = ImageName(
+ "Name the output image",
+ "FilteredImage",
+ doc="""Enter a name for the resulting image.""",
+ )
+
+ self.calculate_maximal = Binary(
+ "Calculate optimal window size to maximize image variance?",
+ False,
+ doc="""\
+Select "*Yes*" to provide a range that will be used to obtain the window size that will generate
+the maximum variance in the input image.
+Select "*No*" to give the window size used to obtain the image variance.""",
+ )
+
+ self.window_size = Integer(
+ "Window size",
+ 5,
+ minval=1,
+ doc="""Enter the size of the window used to calculate the variance.""",
+ )
+
+ self.window_min = Integer(
+ "Window min",
+ 5,
+ minval=1,
+ doc="""Enter the minimum size of the window used to calculate the variance.""",
+ )
+
+ self.window_max = Integer(
+ "Window max",
+ 50,
+ minval=1,
+ doc="""Enter the maximum size of the window used to calculate the variance.""",
+ )
+
+ def settings(self):
+ return [
+ self.image_name,
+ self.output_image_name,
+ self.calculate_maximal,
+ self.window_size,
+ self.window_min,
+ self.window_max,
+ ]
+
+ def visible_settings(self):
+ __settings__ = [
+ self.image_name,
+ self.output_image_name,
+ ]
+ __settings__ += [
+ self.calculate_maximal,
+ ]
+ if not self.calculate_maximal.value:
+ __settings__ += [
+ self.window_size,
+ ]
+ else:
+ __settings__ += [
+ self.window_min,
+ self.window_max,
+ ]
+ return __settings__
+
+ def run(self, workspace):
+
+ image = workspace.image_set.get_image(
+ self.image_name.value, must_be_grayscale=True
+ )
+
+ image_pixels = image.pixel_data
+
+ window_range = range(self.window_min.value, self.window_max.value, 1)
+
+ size = self.window_size.value
+
+ if self.calculate_maximal.value:
+ max_variance = -1
+ for window in window_range:
+ result = abs(
+ scipy.ndimage.uniform_filter(
+ image_pixels**2, size=window, output=numpy.float64
+ )
+ - (
+ scipy.ndimage.uniform_filter(
+ image_pixels, size=window, output=numpy.float64
+ )
+ ** 2
+ )
+ )
+ variance = result.max()
+ if variance > max_variance:
+ max_variance = variance
+ size = window
+
+ output_pixels = abs(
+ scipy.ndimage.uniform_filter(
+ image_pixels**2, size=size, output=numpy.float64
+ )
+ - (
+ scipy.ndimage.uniform_filter(
+ image_pixels, size=size, output=numpy.float64
+ )
+ ** 2
+ )
+ )
+
+ new_image = Image(
+ output_pixels, parent_image=image, dimensions=image.dimensions
+ )
+
+ workspace.image_set.add(self.output_image_name.value, new_image)
+
+ if self.show_window:
+ workspace.display_data.pixel_data = image_pixels
+
+ workspace.display_data.output_pixels = output_pixels
+
+ workspace.display_data.dimensions = image.dimensions
+
+ def display(self, workspace, figure):
+ layout = (2, 1)
+ figure.set_subplots(
+ dimensions=workspace.display_data.dimensions, subplots=layout
+ )
+
+ figure.subplot_imshow(
+ colormap="gray",
+ image=workspace.display_data.pixel_data,
+ title=self.image_name.value,
+ x=0,
+ y=0,
+ )
+
+ figure.subplot_imshow(
+ colormap="gray",
+ image=workspace.display_data.output_pixels,
+ title=self.output_image_name.value,
+ x=1,
+ y=0,
+ )
diff --git a/CP5/tests/__init__.py b/CP5/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/CP5/tests/conftest.py b/CP5/tests/conftest.py
new file mode 100644
index 00000000..670e1e33
--- /dev/null
+++ b/CP5/tests/conftest.py
@@ -0,0 +1,161 @@
+# Something in CellProfiler is importing wx before we can set
+# headless mode. Setting headless here before importing anything
+# else from CellProfiler.
+import cellprofiler_core.preferences
+
+cellprofiler_core.preferences.set_headless()
+
+import cellprofiler_core.image
+import cellprofiler_core.measurement
+import cellprofiler_core.object
+import cellprofiler_core.pipeline
+import cellprofiler_core.workspace
+import cellprofiler_core.reader
+import numpy
+import skimage.data
+import skimage.color
+import skimage.filters
+import skimage.measure
+import pytest
+
+
+def pytest_sessionstart(session):
+ cellprofiler_core.reader.fill_readers(check_config=True)
+
+@pytest.fixture(
+ scope="module",
+ params=[
+ (skimage.data.camera()[0:128, 0:128], 2),
+ (skimage.data.astronaut()[0:128, 0:128, :], 2),
+ (numpy.tile(skimage.data.camera()[0:32, 0:32], (2, 1)).reshape(2, 32, 32), 3)
+ ],
+ ids=[
+ "grayscale_image",
+ "multichannel_image",
+ "grayscale_volume"
+ ]
+)
+def image(request):
+ data, dimensions = request.param
+
+ return cellprofiler_core.image.Image(image=data, dimensions=dimensions)
+
+
+@pytest.fixture(scope="function")
+def image_empty():
+ image = cellprofiler_core.image.Image()
+
+ return image
+
+
+@pytest.fixture(scope="function")
+def image_set(image, image_set_list):
+ image_set = image_set_list.get_image_set(0)
+
+ image_set.add("example", image)
+
+ return image_set
+
+
+@pytest.fixture(scope="function")
+def image_set_empty(image_empty, image_set_list):
+ image_set = image_set_list.get_image_set(0)
+ image_set.add("example", image_empty)
+
+ return image_set
+
+
+@pytest.fixture(scope="function")
+def image_set_list():
+ return cellprofiler_core.image.ImageSetList()
+
+
+@pytest.fixture(scope="function")
+def measurements():
+ return cellprofiler_core.measurement.Measurements()
+
+
+@pytest.fixture(scope="function")
+def module(request):
+ instance = getattr(request.module, "instance")
+
+ return instance()
+
+
+@pytest.fixture(scope="function")
+def objects(image):
+ obj = cellprofiler_core.object.Objects()
+ obj.parent_image = image
+
+ return obj
+
+
+@pytest.fixture(scope="function")
+def objects_empty():
+ obj = cellprofiler_core.object.Objects()
+
+ return obj
+
+
+@pytest.fixture(scope="function")
+def object_set(objects):
+ objects_set = cellprofiler_core.object.ObjectSet()
+ objects_set.add_objects(objects, "InputObjects")
+
+ return objects_set
+
+
+@pytest.fixture(scope="function")
+def object_set_empty(objects_empty):
+ objects_set = cellprofiler_core.object.ObjectSet()
+ objects_set.add_objects(objects_empty, "InputObjects")
+
+ return objects_set
+
+
+@pytest.fixture(scope="function")
+def object_with_data(image):
+ data = image.pixel_data
+
+ if image.multichannel:
+ data = skimage.color.rgb2gray(data)
+
+ binary = data > skimage.filters.threshold_li(data)
+
+ labels = skimage.measure.label(binary)
+
+ objects = cellprofiler_core.object.Objects()
+
+ objects.segmented = labels
+ objects.parent_image = image
+
+ return objects
+
+
+@pytest.fixture(scope="function")
+def object_set_with_data(object_with_data):
+ objects_set = cellprofiler_core.object.ObjectSet()
+ objects_set.add_objects(object_with_data, "InputObjects")
+
+ return objects_set
+
+
+@pytest.fixture(scope="function")
+def pipeline():
+ return cellprofiler_core.pipeline.Pipeline()
+
+
+@pytest.fixture(scope="function")
+def workspace(pipeline, module, image_set, object_set, measurements, image_set_list):
+ return cellprofiler_core.workspace.Workspace(pipeline, module, image_set, object_set, measurements, image_set_list)
+
+
+@pytest.fixture(scope="function")
+def workspace_empty(pipeline, module, image_set_empty, object_set_empty, measurements, image_set_list):
+ return cellprofiler_core.workspace.Workspace(pipeline, module, image_set_empty, object_set_empty, measurements, image_set_list)
+
+
+@pytest.fixture(scope="function")
+def workspace_with_data(pipeline, module, image_set, object_set_with_data, measurements, image_set_list):
+ return cellprofiler_core.workspace.Workspace(pipeline, module, image_set, object_set_with_data,
+ measurements, image_set_list)
diff --git a/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_BASIC.cppipe b/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_BASIC.cppipe
new file mode 100644
index 00000000..53b9e2c7
--- /dev/null
+++ b/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_BASIC.cppipe
@@ -0,0 +1,158 @@
+CellProfiler Pipeline: http://www.cellprofiler.org
+Version:5
+DateRevision:425
+GitHash:
+ModuleCount:11
+HasImagePlaneDetails:False
+
+Images:[module_num:1|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['To begin creating your project, use the Images module to compile a list of files and/or folders that you want to analyze. You can also specify a set of rules to include only the desired files in your selected folders.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ :
+ Filter images?:Images only
+ Select the rule criteria:and (extension does isimage) (directory doesnot containregexp "[\\\\/]\\.")
+
+Metadata:[module_num:2|svn_version:'Unknown'|variable_revision_number:6|show_window:True|notes:['The Metadata module optionally allows you to extract information describing your images (i.e, metadata) which will be stored along with your measurements. This information can be contained in the file name and/or location, or in an external file.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Extract metadata?:No
+ Metadata data type:Text
+ Metadata types:{}
+ Extraction method count:1
+ Metadata extraction method:Extract from file/folder names
+ Metadata source:File name
+ Regular expression to extract from file name:^(?P.*)_(?P[A-P][0-9]{2})_s(?P[0-9])_w(?P[0-9])
+ Regular expression to extract from folder name:(?P[0-9]{4}_[0-9]{2}_[0-9]{2})$
+ Extract metadata from:All images
+ Select the filtering criteria:and (file does contain "")
+ Metadata file location:Elsewhere...|
+ Match file and image metadata:[]
+ Use case insensitive matching?:No
+ Metadata file name:None
+ Does cached metadata exist?:No
+
+NamesAndTypes:[module_num:3|svn_version:'Unknown'|variable_revision_number:8|show_window:True|notes:['The NamesAndTypes module allows you to assign a meaningful name to each image by which other modules will refer to it.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Assign a name to:All images
+ Select the image type:Grayscale image
+ Name to assign these images:DNA
+ Match metadata:[]
+ Image set matching method:Order
+ Set intensity range from:Image metadata
+ Assignments count:1
+ Single images count:0
+ Maximum intensity:255.0
+ Process as 3D?:No
+ Relative pixel spacing in X:1.0
+ Relative pixel spacing in Y:1.0
+ Relative pixel spacing in Z:1.0
+ Select the rule criteria:and (file does contain "")
+ Name to assign these images:DNA
+ Name to assign these objects:Cell
+ Select the image type:Grayscale image
+ Set intensity range from:Image metadata
+ Maximum intensity:255.0
+
+Groups:[module_num:4|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['The Groups module optionally allows you to split your list of images into image subsets (groups) which will be processed independently of each other. Examples of groupings include screening batches, microtiter plates, time-lapse movies, etc.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Do you want to group your images?:No
+ grouping metadata count:1
+ Metadata category:None
+
+IdentifyPrimaryObjects:[module_num:5|svn_version:'Unknown'|variable_revision_number:15|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Select the input image:DNA
+ Name the primary objects to be identified:IdentifyPrimaryObjects
+ Typical diameter of objects, in pixel units (Min,Max):10,40
+ Discard objects outside the diameter range?:Yes
+ Discard objects touching the border of the image?:Yes
+ Method to distinguish clumped objects:Intensity
+ Method to draw dividing lines between clumped objects:Intensity
+ Size of smoothing filter:10
+ Suppress local maxima that are closer than this minimum allowed distance:7.0
+ Speed up by using lower-resolution image to find local maxima?:Yes
+ Fill holes in identified objects?:After both thresholding and declumping
+ Automatically calculate size of smoothing filter for declumping?:Yes
+ Automatically calculate minimum allowed distance between local maxima?:Yes
+ Handling of objects if excessive number of objects identified:Continue
+ Maximum number of objects:500
+ Use advanced settings?:No
+ Threshold setting version:12
+ Threshold strategy:Global
+ Thresholding method:Minimum Cross-Entropy
+ Threshold smoothing scale:1.3488
+ Threshold correction factor:1.0
+ Lower and upper bounds on threshold:0.0,1.0
+ Manual threshold:0.0
+ Select the measurement to threshold with:None
+ Two-class or three-class thresholding?:Two classes
+ Log transform before thresholding?:No
+ Assign pixels in the middle intensity class to the foreground or the background?:Foreground
+ Size of adaptive window:50
+ Lower outlier fraction:0.05
+ Upper outlier fraction:0.05
+ Averaging method:Mean
+ Variance method:Standard deviation
+ # of deviations:2.0
+ Thresholding method:Minimum Cross-Entropy
+
+CompensateColors:[module_num:6|svn_version:'Unknown'|variable_revision_number:1|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Hidden:2
+ Hidden:1
+ Select an image to measure:DNA
+ What compensation class does this image belong to?:1
+ Select an output image name:None
+ Select an image to measure:DNA
+ What compensation class does this image belong to?:1
+ Select an output image name:None
+ Select where to perform color compensation:Across entire image
+ Select an object to perform compensation within:None
+ Should individual images be rescaled 0-1 before compensating pre-masking or on unmasked images?:No
+ Should images be rescaled 0-1 before compensating but after masking to objects?:No
+ Should histogram matching be performed between the image groups?:No
+ What compensation class should serve as the template histogram?:1
+ Should images be rescaled 0-1 after compensating?:No
+ Should the images be divided by a scalar based on group percentiles:No
+ What percentile should be used for multiplication:99.0
+ Should the images have a tophat filter applied before correction?:No
+ What size radius should be used for the tophat filter?:3
+ Should the images have a Laplacian of Gaussian filter applied before correction?:No
+ What size radius should be used for the LoG filter?:1
+ Should the images have a Difference of Gaussians filter applied before correction?:No
+ What size sigma should be used for the DoG low sigma?:3
+ What size radius should be used for the DoG low sigma?:5
+
+DistanceTransform:[module_num:7|svn_version:'Unknown'|variable_revision_number:1|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Select the input image:DNA
+ Name the output image:DistanceTransform
+ Rescale values from 0 to 1?:Yes
+
+EnhancedMeasureTexture:[module_num:8|svn_version:'Unknown'|variable_revision_number:3|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Hidden:1
+ Hidden:1
+ Hidden:1
+ Select an image to measure:DNA
+ Select objects to measure:IdentifyPrimaryObjects
+ Texture scale to measure:3
+ Angles to measure:Horizontal,Vertical,Diagonal,Anti-diagonal
+ Measure Gabor features?:Yes
+ Number of angles to compute for Gabor:4
+ Measure Tamura features?:Yes
+ Features to compute:Coarseness,Contrast,Directionality
+
+HistogramEqualization:[module_num:9|svn_version:'Unknown'|variable_revision_number:1|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Select the input image:DNA
+ Name the output image:HistogramEqualization
+ Bins:256
+ Mask:Leave blank
+ Local:Yes
+ Tile Size:100
+ Clip limit:0.01
+ Is you image 3D?:No
+ Do framewise calculation?:No
+ Tile Size (Z):5
+
+HistogramMatching:[module_num:10|svn_version:'Unknown'|variable_revision_number:1|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Select the input image:DNA
+ Name the output image:HistogramMatching
+ Is you image 3D?:No
+ Use a frame within image as reference?:No
+ Image to use as reference :None
+ Frame number:5
+
+PixelShuffle:[module_num:11|svn_version:'Unknown'|variable_revision_number:1|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Select the input image:DNA
+ Name the output image:PixelShuffle
diff --git a/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_CELLPOSE.cppipe b/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_CELLPOSE.cppipe
new file mode 100644
index 00000000..aa6e5b26
--- /dev/null
+++ b/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_CELLPOSE.cppipe
@@ -0,0 +1,77 @@
+CellProfiler Pipeline: http://www.cellprofiler.org
+Version:5
+DateRevision:425
+GitHash:
+ModuleCount:11
+HasImagePlaneDetails:False
+
+Images:[module_num:1|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['To begin creating your project, use the Images module to compile a list of files and/or folders that you want to analyze. You can also specify a set of rules to include only the desired files in your selected folders.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ :
+ Filter images?:Images only
+ Select the rule criteria:and (extension does isimage) (directory doesnot containregexp "[\\\\/]\\.")
+
+Metadata:[module_num:2|svn_version:'Unknown'|variable_revision_number:6|show_window:True|notes:['The Metadata module optionally allows you to extract information describing your images (i.e, metadata) which will be stored along with your measurements. This information can be contained in the file name and/or location, or in an external file.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Extract metadata?:No
+ Metadata data type:Text
+ Metadata types:{}
+ Extraction method count:1
+ Metadata extraction method:Extract from file/folder names
+ Metadata source:File name
+ Regular expression to extract from file name:^(?P.*)_(?P[A-P][0-9]{2})_s(?P[0-9])_w(?P[0-9])
+ Regular expression to extract from folder name:(?P[0-9]{4}_[0-9]{2}_[0-9]{2})$
+ Extract metadata from:All images
+ Select the filtering criteria:and (file does contain "")
+ Metadata file location:Elsewhere...|
+ Match file and image metadata:[]
+ Use case insensitive matching?:No
+ Metadata file name:None
+ Does cached metadata exist?:No
+
+NamesAndTypes:[module_num:3|svn_version:'Unknown'|variable_revision_number:8|show_window:True|notes:['The NamesAndTypes module allows you to assign a meaningful name to each image by which other modules will refer to it.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Assign a name to:All images
+ Select the image type:Grayscale image
+ Name to assign these images:DNA
+ Match metadata:[]
+ Image set matching method:Order
+ Set intensity range from:Image metadata
+ Assignments count:1
+ Single images count:0
+ Maximum intensity:255.0
+ Process as 3D?:No
+ Relative pixel spacing in X:1.0
+ Relative pixel spacing in Y:1.0
+ Relative pixel spacing in Z:1.0
+ Select the rule criteria:and (file does contain "")
+ Name to assign these images:DNA
+ Name to assign these objects:Cell
+ Select the image type:Grayscale image
+ Set intensity range from:Image metadata
+ Maximum intensity:255.0
+
+Groups:[module_num:4|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['The Groups module optionally allows you to split your list of images into image subsets (groups) which will be processed independently of each other. Examples of groupings include screening batches, microtiter plates, time-lapse movies, etc.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Do you want to group your images?:No
+ grouping metadata count:1
+ Metadata category:None
+
+RunCellpose:[module_num:5|svn_version:'Unknown'|variable_revision_number:3|show_window:False|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Select the input image:DNA
+ Expected object diameter:12
+ Detection mode:nuclei
+ Name the output object:Nuclei_pose
+ Use GPU:No
+ Use averaging:Yes
+ Supply nuclei image as well?:No
+ Select the nuclei image:None
+ Save probability image?:No
+ Name the probability image:Probabilities
+ Location of the pre-trained model file:Elsewhere...|
+ Pre-trained model file name:cyto_0
+ Flow threshold:0.4
+ Cell probability threshold:0.0
+ GPU memory share for each worker:0.1
+ Stitch Threshold:0.0
+ Use 3D:No
+ Minimum size:10
+ Z rescaling factor (anisotropy):1.0
+ Use Omnipose for mask reconstruction:No
+ Invert images:No
\ No newline at end of file
diff --git a/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_STARDIST.cppipe b/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_STARDIST.cppipe
new file mode 100644
index 00000000..2284d9f1
--- /dev/null
+++ b/CP5/tests/headless_test/4.2.5_plugins_test_pipeline_STARDIST.cppipe
@@ -0,0 +1,65 @@
+CellProfiler Pipeline: http://www.cellprofiler.org
+Version:5
+DateRevision:425
+GitHash:
+ModuleCount:11
+HasImagePlaneDetails:False
+
+Images:[module_num:1|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['To begin creating your project, use the Images module to compile a list of files and/or folders that you want to analyze. You can also specify a set of rules to include only the desired files in your selected folders.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ :
+ Filter images?:Images only
+ Select the rule criteria:and (extension does isimage) (directory doesnot containregexp "[\\\\/]\\.")
+
+Metadata:[module_num:2|svn_version:'Unknown'|variable_revision_number:6|show_window:True|notes:['The Metadata module optionally allows you to extract information describing your images (i.e, metadata) which will be stored along with your measurements. This information can be contained in the file name and/or location, or in an external file.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Extract metadata?:No
+ Metadata data type:Text
+ Metadata types:{}
+ Extraction method count:1
+ Metadata extraction method:Extract from file/folder names
+ Metadata source:File name
+ Regular expression to extract from file name:^(?P.*)_(?P[A-P][0-9]{2})_s(?P[0-9])_w(?P[0-9])
+ Regular expression to extract from folder name:(?P[0-9]{4}_[0-9]{2}_[0-9]{2})$
+ Extract metadata from:All images
+ Select the filtering criteria:and (file does contain "")
+ Metadata file location:Elsewhere...|
+ Match file and image metadata:[]
+ Use case insensitive matching?:No
+ Metadata file name:None
+ Does cached metadata exist?:No
+
+NamesAndTypes:[module_num:3|svn_version:'Unknown'|variable_revision_number:8|show_window:True|notes:['The NamesAndTypes module allows you to assign a meaningful name to each image by which other modules will refer to it.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Assign a name to:All images
+ Select the image type:Grayscale image
+ Name to assign these images:DNA
+ Match metadata:[]
+ Image set matching method:Order
+ Set intensity range from:Image metadata
+ Assignments count:1
+ Single images count:0
+ Maximum intensity:255.0
+ Process as 3D?:No
+ Relative pixel spacing in X:1.0
+ Relative pixel spacing in Y:1.0
+ Relative pixel spacing in Z:1.0
+ Select the rule criteria:and (file does contain "")
+ Name to assign these images:DNA
+ Name to assign these objects:Cell
+ Select the image type:Grayscale image
+ Set intensity range from:Image metadata
+ Maximum intensity:255.0
+
+Groups:[module_num:4|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['The Groups module optionally allows you to split your list of images into image subsets (groups) which will be processed independently of each other. Examples of groupings include screening batches, microtiter plates, time-lapse movies, etc.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Do you want to group your images?:No
+ grouping metadata count:1
+ Metadata category:None
+
+RunStarDist:[module_num:5|svn_version:'Unknown'|variable_revision_number:1|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False]
+ Select the input image:DNA
+ Model:2D
+ Name the output object:RunStarDist
+ Tile input image?:No
+ Horizontal tiles:1
+ Vertical tiles:1
+ Save probability image?:Yes
+ Name the probability image:Probabilities
+ Model folder:Elsewhere...|
\ No newline at end of file
diff --git a/CP5/tests/headless_test/test_pipeline_img/skimage-mitosis-img.tiff b/CP5/tests/headless_test/test_pipeline_img/skimage-mitosis-img.tiff
new file mode 100644
index 00000000..27f06b08
Binary files /dev/null and b/CP5/tests/headless_test/test_pipeline_img/skimage-mitosis-img.tiff differ
diff --git a/CP5/tests/headless_test/test_run.sh b/CP5/tests/headless_test/test_run.sh
new file mode 100644
index 00000000..bbb267b3
--- /dev/null
+++ b/CP5/tests/headless_test/test_run.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# if grep -q "$1" "$2"
+if grep -qiE "$1" "$2"
+then
+ echo "Pipeline ran successfully"
+ `exit 0`
+else
+ echo "Failed to run pipeline ($1 failed)"
+ `exit 1`
+fi
+
diff --git a/CP5/tests/resources/callbarcodes_Barcodes.csv b/CP5/tests/resources/callbarcodes_Barcodes.csv
new file mode 100644
index 00000000..b6eb8df9
--- /dev/null
+++ b/CP5/tests/resources/callbarcodes_Barcodes.csv
@@ -0,0 +1,6 @@
+gene_symbol,sgRNA
+ABCF1,GCAACACATCAATGTTGGGA
+ADAR,TTCTTGTAGGGTGAACACCG
+SLC25A6,CGAAGTTGAGGGCTTGAGTG
+ANXA2,GGTCCTTCTCTGGTAGGCGA
+nontargeting,CGCAATCCCTTAGGATAGCC
\ No newline at end of file
diff --git a/CP5/tests/resources/runimagejscript_dummyscript.py b/CP5/tests/resources/runimagejscript_dummyscript.py
new file mode 100644
index 00000000..573e894e
--- /dev/null
+++ b/CP5/tests/resources/runimagejscript_dummyscript.py
@@ -0,0 +1,4 @@
+#@ ImgPlus image
+#@output ImgPlus copy
+
+copy = image
diff --git a/CP5/tests/test_histogramequalization.py b/CP5/tests/test_histogramequalization.py
new file mode 100644
index 00000000..ce54671c
--- /dev/null
+++ b/CP5/tests/test_histogramequalization.py
@@ -0,0 +1,140 @@
+import cellprofiler.image
+import numpy
+import numpy.testing
+import skimage.exposure
+
+import histogramequalization
+
+instance = histogramequalization.HistogramEqualization
+
+
+def test_run(image, image_set, module, workspace):
+ module.x_name.value = "example"
+
+ module.y_name.value = "HistogramEqualization"
+
+ module.nbins.value = 256
+
+ module.mask.value = "Leave blank"
+
+ module.local.value = False
+
+ module.run(workspace)
+
+ actual = image_set.get_image("HistogramEqualization")
+
+ data = image.pixel_data
+
+ expected_data = skimage.exposure.equalize_hist(data)
+
+ expected = cellprofiler.image.Image(
+ image=expected_data,
+ parent_image=image,
+ dimensions=image.dimensions
+ )
+
+ numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data)
+
+
+def test_run_local(image, image_set, module, workspace):
+ module.x_name.value = "example"
+
+ module.y_name.value = "HistogramEqualization"
+
+ module.nbins.value = 256
+
+ module.local.value = True
+
+ module.run(workspace)
+
+ actual = image_set.get_image("HistogramEqualization")
+
+ data = image.pixel_data
+
+ if image.volumetric:
+ expected_data = numpy.zeros_like(data)
+
+ for index, plane in enumerate(data):
+ expected_data[index] = skimage.exposure.equalize_adapthist(plane, kernel_size=256)
+ else:
+ expected_data = skimage.exposure.equalize_adapthist(data, kernel_size=256)
+
+ expected = cellprofiler.image.Image(
+ image=expected_data,
+ parent_image=image,
+ dimensions=image.dimensions
+ )
+
+ numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data)
+
+
+def test_run_nbins(image, image_set, module, workspace):
+ module.x_name.value = "example"
+
+ module.y_name.value = "HistogramEqualization"
+
+ module.nbins.value = 128
+
+ module.local.value = False
+
+ module.mask.value = "Leave blank"
+
+ module.run(workspace)
+
+ actual = image_set.get_image("HistogramEqualization")
+
+ data = image.pixel_data
+
+ expected_data = skimage.exposure.equalize_hist(data, nbins=128)
+
+ expected = cellprofiler.image.Image(
+ image=expected_data,
+ parent_image=image,
+ dimensions=image.dimensions
+ )
+
+ numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data)
+
+
+def test_run_mask(image, image_set, module, workspace):
+ data = image.pixel_data
+
+ mask_data = numpy.zeros_like(data, dtype="bool")
+
+ if image.multichannel:
+ mask_data[5:-5, 5:-5, :] = True
+ elif image.dimensions == 3:
+ mask_data[:, 5:-5, 5:-5] = True
+ else:
+ mask_data[5:-5, 5:-5] = True
+
+ mask = cellprofiler.image.Image(
+ image=mask_data,
+ dimensions=image.dimensions
+ )
+
+ image_set.add("Mask", mask)
+
+ module.x_name.value = "example"
+
+ module.y_name.value = "HistogramEqualization"
+
+ module.nbins.value = 256
+
+ module.local.value = False
+
+ module.mask.value = "Mask"
+
+ module.run(workspace)
+
+ actual = image_set.get_image("HistogramEqualization")
+
+ expected_data = skimage.exposure.equalize_hist(data, mask=mask_data)
+
+ expected = cellprofiler.image.Image(
+ image=expected_data,
+ parent_image=image,
+ dimensions=image.dimensions
+ )
+
+ numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data)
diff --git a/CP5/tests/test_runimagejscript.py b/CP5/tests/test_runimagejscript.py
new file mode 100644
index 00000000..8fbc7d4e
--- /dev/null
+++ b/CP5/tests/test_runimagejscript.py
@@ -0,0 +1,91 @@
+import numpy
+
+import cellprofiler_core.image
+import cellprofiler_core.measurement
+
+import cellprofiler_core.setting.subscriber
+import cellprofiler_core.setting.text.alphanumeric
+
+from cellprofiler_core.setting.text import Directory, Filename
+
+
+import cellprofiler.modules.crop
+import cellprofiler_core.object
+import cellprofiler_core.pipeline
+import cellprofiler_core.workspace
+
+import cellprofiler.modules.runimagejscript
+
+INPUT_IMAGE = "input_image"
+CROP_IMAGE = "crop_image"
+CROP_OBJECTS = "crop_objects"
+CROPPING = "cropping"
+OUTPUT_IMAGE = "output_image"
+
+
+def make_workspace():
+ """Return a workspace with the given image and the runimagejscript module"""
+ pipeline = cellprofiler_core.pipeline.Pipeline()
+
+ module = cellprofiler.modules.runimagejscript.RunImageJScript()
+ module.set_module_num(1)
+ image_set_list = cellprofiler_core.image.ImageSetList()
+ image_set = image_set_list.get_image_set(0)
+
+ object_set = cellprofiler_core.object.ObjectSet()
+
+ def callback(caller, event):
+ assert not isinstance(event, cellprofiler_core.pipeline.event.RunException)
+
+ pipeline.add_listener(callback)
+ pipeline.add_module(module)
+ m = cellprofiler_core.measurement.Measurements()
+
+ workspace = cellprofiler_core.workspace.Workspace(
+ pipeline, module, image_set, object_set, m, image_set_list
+ )
+
+ return module, workspace
+
+def test_start_image_j():
+ module, workspace = make_workspace()
+ module.init_pyimagej()
+ module.close_pyimagej()
+
+
+def test_parse_parameters():
+ module, workspace = make_workspace()
+
+ module.script_directory = Directory(
+ "Script directory")
+ module.script_file = Filename(
+ "ImageJ Script", "./../resources/modules/runimagejscript/dummyscript.py")
+ module.get_parameters_from_script()
+
+ assert len(module.script_parameter_list) > 0
+
+ assert module.script_parameter_list[0].name.value == "image"
+ assert module.script_parameter_list[1].name.value == "copy"
+
+ assert isinstance(module.script_parameter_list[0].setting, cellprofiler_core.setting.subscriber.ImageSubscriber)
+ assert isinstance(module.script_parameter_list[1].setting, cellprofiler_core.setting.text.alphanumeric.name.image_name._image_name.ImageName)
+
+def test_copy_image():
+ x, y = numpy.mgrid[0:10, 0:10]
+ input_image = (x / 100.0 + y / 10.0).astype(numpy.float32)
+
+ module, workspace = make_workspace()
+
+ module.script_directory = Directory(
+ "Script directory")
+ module.script_file = Filename(
+ "ImageJ Script", "./../resources/modules/runimagejscript/dummyscript.py")
+ module.get_parameters_from_script()
+
+ workspace.image_set.add("None", cellprofiler_core.image.Image(input_image))
+
+ module.run(workspace)
+
+ output_image = workspace.image_set.get_image("copy")
+
+ assert numpy.all(output_image.pixel_data == input_image)
\ No newline at end of file