diff --git a/src/imcflibs/imagej/bioformats.py b/src/imcflibs/imagej/bioformats.py index 9767b54d..758cc072 100644 --- a/src/imcflibs/imagej/bioformats.py +++ b/src/imcflibs/imagej/bioformats.py @@ -289,3 +289,243 @@ def write_bf_memoryfile(path_to_file): reader = Memoizer(ImageReader()) reader.setId(path_to_file) reader.close() + + +def get_metadata_from_file(path_to_image): + """Extract metadata from an image file using Bio-Formats. + + This function reads an image file using the Bio-Formats library and extracts + various metadata properties including physical dimensions, pixel dimensions, + and other image characteristics. + + Parameters + ---------- + path_to_image : str or pathlib.Path + Path to the image file from which metadata should be extracted. + + Returns + ------- + dict + A dictionary containing the following metadata: + + { + unit_width : float, # physical width of a pixel + unit_height : float, # physical height of a pixel + unit_depth : float, # physical depth of a voxel + pixel_width : int, # width of the image in pixels + pixel_height : int, # height of the image in pixels + slice_count : int, # number of Z-slices + channel_count : int, # number of channels + timepoints_count : int, # number of timepoints + dimension_order : str, # order of dimensions, e.g. "XYZCT" + pixel_type : str, # data type of the pixel values + } + """ + reader = ImageReader() + ome_meta = MetadataTools.createOMEXMLMetadata() + reader.setMetadataStore(ome_meta) + reader.setId(str(path_to_image)) + + phys_size_x = ome_meta.getPixelsPhysicalSizeX(0) + phys_size_y = ome_meta.getPixelsPhysicalSizeY(0) + phys_size_z = ome_meta.getPixelsPhysicalSizeZ(0) + pixel_size_x = ome_meta.getPixelsSizeX(0) + pixel_size_y = ome_meta.getPixelsSizeY(0) + pixel_size_z = ome_meta.getPixelsSizeZ(0) + channel_count = ome_meta.getPixelsSizeC(0) + timepoints_count = ome_meta.getPixelsSizeT(0) + dimension_order = ome_meta.getPixelsDimensionOrder(0) + pixel_type = ome_meta.getPixelsType(0) + + image_calibration = { + "unit_width": phys_size_x.value(), + "unit_height": phys_size_y.value(), + "unit_depth": phys_size_z.value(), + "pixel_width": pixel_size_x.getNumberValue(), + "pixel_height": pixel_size_y.getNumberValue(), + "slice_count": pixel_size_z.getNumberValue(), + "channel_count": channel_count.getNumberValue(), + "timepoints_count": timepoints_count.getNumberValue(), + "dimension_order": dimension_order, + "pixel_type": pixel_type, + } + + reader.close() + + return image_calibration + + +def get_stage_coords(source, filenames): + """Get stage coordinates and calibration for a given list of images. + + Parameters + ---------- + source : str + Path to the images. + filenames : list of str + List of images filenames. + + Returns + ------- + dict + + { + dimensions : int, # number of dimensions (2D or 3D) + stage_coordinates_x : list, # absolute stage x-coordinated + stage_coordinates_y : list, # absolute stage y-coordinated + stage_coordinates_z : list, # absolute stage z-coordinated + relative_coordinates_x : list, # relative stage x-coordinates in px + relative_coordinates_y : list, # relative stage y-coordinates in px + relative_coordinates_z : list, # relative stage z-coordinates in px + image_calibration : list, # x,y,z image calibration in unit/px + calibration_unit : str, # image calibration unit + image_dimensions_czt : list, # number of images in dimensions c,z,t + series_names : list of str, # names of all series in the files + max_size : list of int, # max size (x/y/z) across all files + } + """ + + # open an array to store the abosolute stage coordinates from metadata + stage_coordinates_x = [] + stage_coordinates_y = [] + stage_coordinates_z = [] + series_names = [] + + for counter, image in enumerate(filenames): + # parse metadata + reader = ImageReader() + reader.setFlattenedResolutions(False) + omeMeta = MetadataTools.createOMEXMLMetadata() + reader.setMetadataStore(omeMeta) + reader.setId(source + str(image)) + series_count = reader.getSeriesCount() + + # get hyperstack dimensions from the first image + if counter == 0: + frame_size_x = reader.getSizeX() + frame_size_y = reader.getSizeY() + frame_size_z = reader.getSizeZ() + frame_size_c = reader.getSizeC() + frame_size_t = reader.getSizeT() + + # note the dimensions + if frame_size_z == 1: + dimensions = 2 + if frame_size_z > 1: + dimensions = 3 + + # get the physical calibration for the first image series + physSizeX = omeMeta.getPixelsPhysicalSizeX(0) + physSizeY = omeMeta.getPixelsPhysicalSizeY(0) + physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) + + # workaround to get the z-interval if physSizeZ.value() returns None. + z_interval = 1 + if physSizeZ is not None: + z_interval = physSizeZ.value() + + if frame_size_z > 1 and physSizeZ is None: + log.debug("no z calibration found, trying to recover") + first_plane = omeMeta.getPlanePositionZ(0, 0) + next_plane_imagenumber = frame_size_c + frame_size_t - 1 + second_plane = omeMeta.getPlanePositionZ(0, next_plane_imagenumber) + z_interval = abs(abs(first_plane.value()) - abs(second_plane.value())) + log.debug("z-interval seems to be: " + str(z_interval)) + + # create an image calibration + image_calibration = [ + physSizeX.value(), + physSizeY.value(), + z_interval, + ] + calibration_unit = physSizeX.unit().getSymbol() + image_dimensions_czt = [ + frame_size_c, + frame_size_z, + frame_size_t, + ] + + reader.close() + + for series in range(series_count): + if omeMeta.getImageName(series) == "macro image": + continue + + if series_count > 1 and not str(image).endswith(".vsi"): + series_names.append(omeMeta.getImageName(series)) + else: + series_names.append(str(image)) + # get the plane position in calibrated units + current_position_x = omeMeta.getPlanePositionX(series, 0) + current_position_y = omeMeta.getPlanePositionY(series, 0) + current_position_z = omeMeta.getPlanePositionZ(series, 0) + + physSizeX_max = ( + physSizeX.value() + if physSizeX.value() >= omeMeta.getPixelsPhysicalSizeX(series).value() + else omeMeta.getPixelsPhysicalSizeX(series).value() + ) + physSizeY_max = ( + physSizeY.value() + if physSizeY.value() >= omeMeta.getPixelsPhysicalSizeY(series).value() + else omeMeta.getPixelsPhysicalSizeY(series).value() + ) + if omeMeta.getPixelsPhysicalSizeZ(series): + physSizeZ_max = ( + physSizeZ.value() + if physSizeZ.value() + >= omeMeta.getPixelsPhysicalSizeZ(series).value() + else omeMeta.getPixelsPhysicalSizeZ(series).value() + ) + + else: + physSizeZ_max = 1.0 + + # get the absolute stage positions and store them + pos_x = current_position_x.value() + pos_y = current_position_y.value() + + if current_position_z is None: + log.debug("the z-position is missing in the ome-xml metadata.") + pos_z = 1.0 + else: + pos_z = current_position_z.value() + + stage_coordinates_x.append(pos_x) + stage_coordinates_y.append(pos_y) + stage_coordinates_z.append(pos_z) + + max_size = [physSizeX_max, physSizeY_max, physSizeZ_max] + + # calculate the store the relative stage movements in px (for the grid/collection stitcher) + relative_coordinates_x_px = [] + relative_coordinates_y_px = [] + relative_coordinates_z_px = [] + + for i in range(len(stage_coordinates_x)): + rel_pos_x = ( + stage_coordinates_x[i] - stage_coordinates_x[0] + ) / physSizeX.value() + rel_pos_y = ( + stage_coordinates_y[i] - stage_coordinates_y[0] + ) / physSizeY.value() + rel_pos_z = (stage_coordinates_z[i] - stage_coordinates_z[0]) / z_interval + + relative_coordinates_x_px.append(rel_pos_x) + relative_coordinates_y_px.append(rel_pos_y) + relative_coordinates_z_px.append(rel_pos_z) + + return { + "dimensions": dimensions, + "stage_coordinates_x": stage_coordinates_x, + "stage_coordinates_y": stage_coordinates_y, + "stage_coordinates_z": stage_coordinates_z, + "relative_coordinates_x": relative_coordinates_x_px, + "relative_coordinates_y": relative_coordinates_y_px, + "relative_coordinates_z": relative_coordinates_z_px, + "image_calibration": image_calibration, + "calibration_unit": calibration_unit, + "image_dimensions_czt": image_dimensions_czt, + "series_names": series_names, + "max_size": max_size, + } diff --git a/src/imcflibs/imagej/labelimage.py b/src/imcflibs/imagej/labelimage.py index 5d800b48..15481e14 100644 --- a/src/imcflibs/imagej/labelimage.py +++ b/src/imcflibs/imagej/labelimage.py @@ -2,7 +2,7 @@ """Functions to work with ImageJ label images.""" -from ij import IJ, ImagePlus, Prefs, ImageStack +from ij import IJ, ImagePlus, ImageStack, Prefs from ij.plugin import Duplicator, ImageCalculator from ij.plugin.filter import ThresholdToSelection from ij.process import FloatProcessor, ImageProcessor @@ -67,7 +67,7 @@ def label_image_to_roi_list(label_image, low_thresh=None): return roi_list, max_value -def relate_label_images(label_image_ref, label_image_to_relate): +def cookie_cut_labels(label_image_ref, label_image_to_relate): """Relate label images, giving the same label to objects belonging together. ❗ NOTE: Won't work with touching labels ❗ @@ -97,6 +97,57 @@ def relate_label_images(label_image_ref, label_image_to_relate): return ImageCalculator.run(label_image_ref, imp_dup, "Multiply create") +def relate_label_images(outer_label_imp, inner_label_imp): + """Relate label images, giving the same label to objects belonging together. + + Given two label images, this function will create a new label image with the + same labels as the reference image, but with the objects of the second image + using the 3D Association plugin from the 3DImageJSuite. + + Parameters + ---------- + outer_label_imp : ij.ImagePlus + The outer label image. + inner_label_imp : ij.ImagePlus + The inner label image. + + Returns + ------- + related_inner_imp : ij.ImagePlus + The related inner label image. + + Notes + ----- + Unlike `cookie_cut_labels`, this should work with touching labels by using + MereoTopology algorithms. + """ + + outer_label_imp.show() + inner_label_imp.show() + + outer_title = outer_label_imp.getTitle() + inner_title = inner_label_imp.getTitle() + + IJ.run( + "3D Association", + "image_a=" + + outer_title + + " " + + "image_b=" + + inner_title + + " " + + "method=Colocalisation min=1 max=0.000", + ) + + related_inner_imp = IJ.getImage() + + outer_label_imp.hide() + inner_label_imp.hide() + related_inner_imp.hide() + + return related_inner_imp + + def filter_objects(label_image, table, string, min_val, max_val): """Filter labels based on specific min and max values. @@ -182,11 +233,11 @@ def binary_to_label(imp, title, min_thresh=1, min_vol=None, max_vol=None): # Set the minimum size for labeling if provided if min_vol: - labeler.setMinSize(min_vol) + labeler.setMinSizeCalibrated(min_vol) # Set the maximum size for labeling if provided if max_vol: - labeler.setMaxSize(max_vol) + labeler.setMinSizeCalibrated(max_vol) # Get the labeled image seg = labeler.getLabels(img) @@ -229,17 +280,7 @@ def dilate_labels_2d(imp, dilation_radius): current_imp = Duplicator().run(imp, 1, 1, i, imp.getNSlices(), 1, 1) # Perform a dilation of the labels in the current slice - IJ.run( - current_imp, - "Label Morphological Filters", - "operation=Dilation radius=" + str(dilation_radius) + " from_any_label", - ) - - # Get the dilated labels - dilated_labels_imp = IJ.getImage() - - # Hide the dilated labels to avoid visual clutter - dilated_labels_imp.hide() + dilated_labels_imp = li.dilateLabels(current_imp, dilation_radius) # Append the dilated labels to the list dilated_labels_list.append(dilated_labels_imp) diff --git a/src/imcflibs/imagej/misc.py b/src/imcflibs/imagej/misc.py index 6392c550..704abbff 100644 --- a/src/imcflibs/imagej/misc.py +++ b/src/imcflibs/imagej/misc.py @@ -1,16 +1,20 @@ """Miscellaneous ImageJ related functions, mostly convenience wrappers.""" import csv +import glob +import os +import smtplib +import subprocess import sys import time -import smtplib -import os from ij import IJ # pylint: disable-msg=import-error -from ij.plugin import ImageCalculator +from ij.plugin import Duplicator, ImageCalculator, StackWriter -from . import prefs +from .. import pathtools from ..log import LOG as log +from . import bioformats as bf +from . import prefs def show_status(msg): @@ -97,24 +101,31 @@ def percentage(part, whole): return 100 * float(part) / float(whole) -def calculate_mean_and_stdv(float_values): +def calculate_mean_and_stdv(values_list, round_decimals=0): """Calculate mean and standard deviation from a list of floats. Parameters ---------- - float_values : list of float - List containing float numbers. + values_list : list of int,float + List containing numbers. + round_decimals : int, optional + Rounding decimal to use for the result, by default 0 Returns ------- tuple of (float, float) Mean and standard deviation of the input list. """ - mean = sum(float_values) / len(float_values) - tot = 0.0 - for x in float_values: - tot = tot + (x - mean) ** 2 - return [mean, (tot / (len(float_values))) ** 0.5] + filtered_list = [x for x in values_list if x is not None] + + if not filtered_list: + return 0, 0 + + mean = round(sum(filtered_list) / len(filtered_list), round_decimals) + variance = sum((x - mean) ** 2 for x in filtered_list) / len(filtered_list) + std_dev = round(variance ** 0.5, round_decimals) + + return mean, std_dev def find_focus(imp): @@ -420,31 +431,262 @@ def get_threshold_value_from_method(imp, method, ops): return threshold_value -def write_results(out_file, content): - """Write the results to a csv file. +def write_ordereddict_to_csv(out_file, content): + """Write data from a list of OrderedDicts to a CSV file. + + When performing measurements in an analysis that is e.g. looping over + multiple files, it's useful to keep the results in `OrderedDict` objects, + e.g. one per analyzed file / dataset. This function can be used to create a + CSV file (or append to an existing one) from a list of `OrderedDict`s. The + structure inside the dicts is entirely up to the calling code (i.e. it's not + related to ImageJ's *Results* window or such), the only requirement is + type-consistency among all the `OrderedDict`s provided to the function. Parameters ---------- out_file : str - Path to the output file. + Path to the output CSV file. content : list of OrderedDict - List of dictionaries representing the results. + List of OrderedDict objects representing the data rows to be written. + All dictionaries must have the same keys. + Notes + ----- + - The CSV file will use the semicolon charachter (`;`) as delimiter. + - When appending to an existing file, the column structure has to match. No + sanity checking is being done on this by the function! + - The output file is opened in binary mode for compatibility. + + Examples + -------- + >>> from collections import OrderedDict + >>> results = [ + ... OrderedDict([('id', 1), ('name', 'Sample A'), ('value', 42.5)]), + ... OrderedDict([('id', 2), ('name', 'Sample B'), ('value', 37.2)]) + ... ] + >>> write_ordereddict_to_csv('results.csv', results) + + The resulting CSV file will have the following content: + + id;name;value + 1;Sample A;42.5 + 2;Sample B;37.2 """ # Check if the output file exists if not os.path.exists(out_file): # If the file does not exist, create it and write the header with open(out_file, "wb") as f: - dict_writer = csv.DictWriter( - f, content[0].keys(), delimiter=";" - ) + dict_writer = csv.DictWriter(f, content[0].keys(), delimiter=";") dict_writer.writeheader() dict_writer.writerows(content) else: # If the file exists, append the results with open(out_file, "ab") as f: - dict_writer = csv.DictWriter( - f, content[0].keys(), delimiter=";" - ) + dict_writer = csv.DictWriter(f, content[0].keys(), delimiter=";") dict_writer.writerows(content) + + +def save_image_in_format(imp, format, out_dir, series, pad_number, split_channels): + """Save an ImagePlus object in the specified format. + + This function provides flexible options for saving ImageJ images in various + formats with customizable naming conventions. It supports different + Bio-Formats compatible formats as well as ImageJ-native formats, and can + handle multi-channel images by either saving them as a single file or + splitting channels into separate files. + + The function automatically creates necessary directories and uses consistent + naming patterns with series numbers. For split channels, separate + subdirectories are created for each channel (C1, C2, etc.). + + Parameters + ---------- + imp : ij.ImagePlus + ImagePlus object to save. + format : {'ImageJ-TIF', 'ICS-1', 'ICS-2', 'OME-TIFF', 'CellH5', 'BMP'} + Output format to use, see Notes section below for details. + out_dir : str + Directory path where the image(s) will be saved. + series : int + Series number to append to the filename. + pad_number : int + Number of digits to use when zero-padding the series number. + split_channels : bool + If True, split channels and save them individually in separate folders + named "C1", "C2", etc. inside out_dir. If False, save all channels in a + single file. + + Notes + ----- + Depending on the value of the `format` parameter, one of the following + output formats and saving strategies will be used: + - Bio-Formats based formats will be produced by calling `bf.export()`, note + that these formats will preserve metadata (which is **not** the case for + the other formats using different saving strategies): + - `ICS-1`: Save as ICS version 1 format (a pair of `.ics` and `.ids`). + - `ICS-2`: Save as ICS version 2 format (single `.ics` file). + - `OME-TIFF`: Save in OME-TIFF format (`.ome.tif`). + - `CellH5`: Save as CellH5 format (`.ch5`). + - `ImageJ-TIF`: Save in ImageJ TIFF format (`.tif`) using `IJ.saveAs()`. + - `BMP`: Save in BMP format using `StackWriter.save()`, producing one `.bmp` + per slice in a subfolder named after the original image. + + Examples + -------- + Save a multichannel image as OME-TIFF without splitting channels: + + >>> save_image_with_extension(imp, "OME-TIFF", "/output/path", 1, 3, False) + # resulting file: /output/path/image_title_series_001.ome.tif + + Save with channel splitting: + + >>> save_image_with_extension(imp, "OME-TIFF", "/output/path", 1, 3, True) + # resulting files: /output/path/C1/image_title_series_001.ome.tif + # /output/path/C2/image_title_series_001.ome.tif + """ + + out_ext = {} + out_ext["ImageJ-TIF"] = ".tif" + out_ext["ICS-1"] = ".ids" + out_ext["ICS-2"] = ".ics" + out_ext["OME-TIFF"] = ".ome.tif" + out_ext["CellH5"] = ".ch5" + out_ext["BMP"] = ".bmp" + + imp_to_use = [] + dir_to_save = [] + + if split_channels: + for channel in range(1, imp.getNChannels() + 1): + imp_to_use.append( + Duplicator().run( + imp, + channel, + channel, + 1, + imp.getNSlices(), + 1, + imp.getNFrames(), + ) + ) + dir_to_save.append(os.path.join(out_dir, "C" + str(channel))) + else: + imp_to_use.append(imp) + dir_to_save.append(out_dir) + + for index, current_imp in enumerate(imp_to_use): + basename = imp.getShortTitle() + + out_path = os.path.join( + dir_to_save[index], + basename + "_series_" + str(series).zfill(pad_number), + ) + + if format == "ImageJ-TIF": + pathtools.create_directory(dir_to_save[index]) + IJ.saveAs(current_imp, "Tiff", out_path + ".tif") + + elif format == "BMP": + out_folder = os.path.join(out_dir, basename + os.path.sep) + pathtools.create_directory(out_folder) + StackWriter.save(current_imp, out_folder, "format=bmp") + + else: + bf.export(current_imp, out_path + out_ext[format]) + + current_imp.close() + + +def pad_number(index, pad_length=2): + """Pad a number with leading zeros to a specified length. + + Parameters + ---------- + index : int or str + The number to be padded + pad_length : int, optional + The total length of the resulting string after padding, by default 2 + + Returns + ------- + str + The padded number as a string + + Examples + -------- + >>> pad_number(7) + '07' + >>> pad_number(42, 4) + '0042' + """ + return str(index).zfill(pad_length) + + +def locate_latest_imaris(paths_to_check=None): + """Find paths to latest installed Imaris or ImarisFileConverter version. + + Identify the full path to the most recent (as in "version number") + ImarisFileConverter or Imaris installation folder with the latter one having + priority. In case nothing is found, an empty string is returned. + + Parameters + ---------- + paths_to_check: list of str, optional + A list of paths that should be used to look for the installations, by default + `None` which will fall back to the standard installation locations of Bitplane. + + Returns + ------- + str + """ + if not paths_to_check: + paths_to_check = [ + r"C:\Program Files\Bitplane\ImarisFileConverter ", + r"C:\Program Files\Bitplane\Imaris ", + ] + + imaris_paths = [""] + + for check in paths_to_check: + hits = glob.glob(check + "*") + imaris_paths += sorted( + hits, + key=lambda x: float(x.replace(check, "").replace(".", "")), + ) + + return imaris_paths[-1] + + +def run_imarisconvert(file_path): + """Convert a given file to Imaris format using ImarisConvert. + + Convert the input image file to Imaris format (Imaris5) using the + ImarisConvert utility. The function uses the latest installed Imaris + application to perform the conversion via `subprocess.call()`. + + Parameters + ---------- + file_path : str + Absolute path to the input image file. + """ + # in case the given file has the suffix `.ids` (meaning it is part of an + # ICS-1 `.ics`+`.ids` pair), point ImarisConvert to the `.ics` file instead: + path_root, file_extension = os.path.splitext(file_path) + if file_extension == ".ids": + file_extension = ".ics" + file_path = path_root + file_extension + + imaris_path = locate_latest_imaris() + + command = 'ImarisConvert.exe -i "%s" -of Imaris5 -o "%s"' % ( + file_path, + file_path.replace(file_extension, ".ims"), + ) + log.debug("\n%s" % command) + IJ.log("Converting to Imaris5 .ims...") + result = subprocess.call(command, shell=True, cwd=imaris_path) + if result == 0: + IJ.log("Conversion to .ims is finished.") + else: + IJ.log("Conversion failed with error code: %d" % result) diff --git a/src/imcflibs/imagej/objects3d.py b/src/imcflibs/imagej/objects3d.py index 44e1d605..a277a9c0 100644 --- a/src/imcflibs/imagej/objects3d.py +++ b/src/imcflibs/imagej/objects3d.py @@ -149,16 +149,17 @@ def get_objects_within_intensity(obj_pop, imp, min_intensity, max_intensity): # Return the new population with the filtered objects return Objects3DPopulation(objects_within_intensity) -def maxima_finder_3D(imageplus, min_threshold=0, noise=100, rxy=1.5, rz=1.5): - """ - Find local maxima in a 3D image. + + +def maxima_finder_3d(imp, min_threshold=0, noise=100, rxy=1.5, rz=1.5): + """Find local maxima in a 3D image. This function identifies local maxima in a 3D image using a specified minimum threshold and noise level. The radii for the maxima detection can be set independently for the x/y and z dimensions. Parameters ---------- - imageplus : ij.ImagePlus + imp : ij.ImagePlus The input 3D image in which to find local maxima. min_threshold : int, optional The minimum intensity threshold for maxima detection. Default is 0. @@ -175,7 +176,7 @@ def maxima_finder_3D(imageplus, min_threshold=0, noise=100, rxy=1.5, rz=1.5): An ImagePlus object containing the detected maxima as peaks. """ # Wrap the input ImagePlus into an ImageHandler - img = ImageHandler.wrap(imageplus) + img = ImageHandler.wrap(imp) # Duplicate the image and apply a threshold cut-off thresholded = img.duplicate() @@ -194,7 +195,7 @@ def maxima_finder_3D(imageplus, min_threshold=0, noise=100, rxy=1.5, rz=1.5): imp_peaks = img_peaks.getImagePlus() # Set the calibration of the peaks image to match the input image - imp_peaks.setCalibration(imageplus.getCalibration()) + imp_peaks.setCalibration(imp.getCalibration()) # Set the title of the peaks image imp_peaks.setTitle("Peaks") @@ -203,8 +204,7 @@ def maxima_finder_3D(imageplus, min_threshold=0, noise=100, rxy=1.5, rz=1.5): def seeded_watershed(imp_binary, imp_peaks, threshold=10): - """ - Perform a seeded watershed segmentation on a binary image using seed points. + """Perform a seeded watershed segmentation on a binary image using seed points. This function applies a watershed segmentation to a binary image using seed points provided in another image. An optional threshold can be specified to control the segmentation process. diff --git a/src/imcflibs/imagej/omerotools.py b/src/imcflibs/imagej/omerotools.py index 6fdfb7ef..2e2c4325 100644 --- a/src/imcflibs/imagej/omerotools.py +++ b/src/imcflibs/imagej/omerotools.py @@ -8,11 +8,17 @@ [simple-omero-client]: https://github.com/GReD-Clermont/simple-omero-client """ -from java.lang import Long - - from fr.igred.omero import Client -from fr.igred.omero.annotations import MapAnnotationWrapper +from fr.igred.omero.annotations import ( + MapAnnotationWrapper, + TableWrapper, +) +from fr.igred.omero.roi import ROIWrapper +from java.lang import Long +from java.text import SimpleDateFormat +from java.util import ArrayList +from omero.cmd import OriginalMetadataRequest +from omero.gateway.model import TableData, TableDataColumn def parse_url(client, omero_str): @@ -187,6 +193,21 @@ def add_annotation(client, repository_wpr, annotations, header): repository_wpr.addMapAnnotation(client, map_annotation_wpr) +def delete_keyvalue_annotations(user_client, object_wrapper): + """Delete annotations linked to object. + + Parameters + ---------- + user_client : fr.igred.omero.Client + Client used for login to OMERO. + object_wrapper : fr.igred.omero.repositor.GenericRepositoryObjectWrapper + Wrapper to the object for the anotation. + + """ + kv_pairs = object_wrapper.getMapAnnotations(user_client) + user_client.delete(kv_pairs) + + def find_dataset(client, dataset_id): """Retrieve a dataset (wrapper) from the OMERO server. @@ -204,3 +225,193 @@ def find_dataset(client, dataset_id): """ # Fetch the dataset from the OMERO server using the provided dataset ID return client.getDataset(Long(dataset_id)) + + def get_acquisition_metadata(user_client, image_wpr): + """Get acquisition metadata from OMERO based on an image ID. + + Parameters + ---------- + user_client : fr.igred.omero.Client + Client used for login to OMERO + image_wpr : fr.igred.omero.repositor.ImageWrapper + Wrapper to the image for the metadata + + Returns + ------- + dict + + { + objective_magnification : float, + objective_na : float, + acquisition_date : str, + acquisition_date_number : str, + } + """ + ctx = user_client.getCtx() + instrument_data = ( + user_client.getGateway() + .getMetadataService(ctx) + .loadInstrument(image_wpr.asDataObject().getInstrumentId()) + ) + objective_data = instrument_data.copyObjective().get(0) + metadata = {} + + metadata["objective_magnification"] = ( + objective_data.getNominalMagnification().getValue() + if objective_data.getNominalMagnification() is not None + else 0 + ) + metadata["objective_na"] = ( + objective_data.getLensNA().getValue() + if objective_data.getLensNA() is not None + else 0 + ) + + if image_wpr.getAcquisitionDate() is None: + if image_wpr.asDataObject().getFormat() == "ZeissCZI": + field = "Information|Document|CreationDate" + date_field = get_info_from_original_metadata( + user_client, image_wpr, field + ) + metadata["acquisition_date"] = date_field.split("T")[0] + metadata["acquisition_date_number"] = int( + metadata["acquisition_date"].replace("-", "") + ) + else: + metadata["acquisition_date"] = "NA" + metadata["acquisition_date_number"] = 0 + else: + sdf = SimpleDateFormat("yyyy-MM-dd") + metadata["acquisition_date"] = sdf.format(image_wpr.getAcquisitionDate()) + metadata["acquisition_date_number"] = int( + metadata["acquisition_date"].replace("-", "") + ) + + return metadata + + +def get_info_from_original_metadata(user_client, image_wpr, field): + """Retrieve information from the original metadata (as opposed to OME-MD). + + In some cases not all information is parsed correctly by BF and has to be + recovered / identified directly from the *original* metadata. This function + extracts the corresponding value based on the field identifier. + + Parameters + ---------- + user_client : fr.igred.omero.Client + Client used for login to OMERO + image_wpr : fr.igred.omero.repositor.ImageWrapper + Wrapper to the image + field : str + Field to look for in the original metadata. Needs to be found beforehand. + + Returns + ------- + str + Value of the field + """ + omr = OriginalMetadataRequest(Long(image_wpr.getId())) + cmd = user_client.getGateway().submit(user_client.getCtx(), omr) + rsp = cmd.loop(5, 500) + gm = rsp.globalMetadata + return gm.get(field).getValue() + + +def create_table_columns(headings): + """Create OMERO table headings from an ImageJ results table. + + Parameters + ---------- + headings : list(str) + List of columns names. + + Returns + ------- + list(omero.gateway.model.TableDataColumn) + List of columns formatted to be uploaded to OMERO. + """ + table_columns = [] + # populate the headings + for h in range(len(headings)): + heading = headings.keys()[h] + type = headings.values()[h] + # OMERO.tables queries don't handle whitespace well + heading = heading.replace(" ", "_") + # title_heading = ["Slice", "Label"] + table_columns.append(TableDataColumn(heading, h, type)) + # table_columns.append(TableDataColumn("Image", size, ImageData)) + return table_columns + + +def upload_array_as_omero_table(user_client, table_title, data, columns, image_wpr): + """Upload a table to OMERO from a list of lists. + + Parameters + ---------- + user_client : fr.igred.omero.Client + Client used for login to OMERO + table_title : str + Title of the table to be uploaded. + data : list(list()) + List of lists of results to upload + columns : list(str) + List of columns names + image_wpr : fr.igred.omero.repositor.ImageWrapper + Wrapper to the image to be uploaded + + Examples + -------- + >>> from fr.igred.omero import Client + >>> from java.lang import String, Double, Long + ... + >>> client = Client() # connect to OMERO + >>> client.connect("omero.example.org", 4064, "username", "password") + ... + >>> image_wpr = client.getImage(Long(123456)) # get an image + ... + >>> columns = { # prepare column definitions (name-type pairs) + ... "Row_ID": Long, + ... "Cell_Area": Double, + ... "Cell_Type": String, + ... } + ... + >>> data = [ # prepare data (list of rows, each row is a list of values) + ... [1, 250.5, "Neuron"], + ... [2, 180.2, "Astrocyte"], + ... [3, 310.7, "Neuron"], + ... ] + ... + >>> upload_array_as_omero_table( + ... client, "Cell Measurements", data, columns, image_wpr + ... ) + """ + + dataset_wpr = image_wpr.getDatasets(user_client)[0] + + table_columns = create_table_columns(columns) + table_data = TableData(table_columns, data) + table_wpr = TableWrapper(table_data) + table_wpr.setName(table_title) + dataset_wpr.addTable(user_client, table_wpr) + + +def save_rois_to_omero(user_client, image_wpr, rm): + """Save ROIs to OMERO linked to the image. + + Parameters + ---------- + user_client : fr.igred.omero.Client + Client used for login to OMERO + image_wpr : fr.igred.omero.repositor.ImageWrapper + Wrapper to the image for the ROIs + rm : ij.plugin.frame.RoiManager + ROI Manager containing the ROIs + + """ + rois_list = rm.getRoisAsArray() + rois_arraylist = ArrayList(len(rois_list)) + for roi in rois_list: + rois_arraylist.add(roi) + rois_to_upload = ROIWrapper.fromImageJ(rois_arraylist) + image_wpr.saveROIs(user_client, rois_to_upload) diff --git a/src/imcflibs/imagej/processing.py b/src/imcflibs/imagej/processing.py new file mode 100644 index 00000000..c50e9c5f --- /dev/null +++ b/src/imcflibs/imagej/processing.py @@ -0,0 +1,136 @@ +"""ImageJ processing utilities for filtering and thresholding images. + +This module provides functions to apply various image processing operations +using ImageJ, including filters, background subtraction, and thresholding. +""" + +from ij import IJ + +from ..log import LOG as log + + +def apply_filter(imp, filter_method, filter_radius, do_3d=False): + """Make a specific filter followed by a threshold method of choice. + + Parameters + ---------- + imp : ImagePlus + Input ImagePlus to filter and threshold + filter_method : str + Name of the filter method to use. Must be one of: + - Median + - Mean + - Gaussian Blur + - Minimum + - Maximum + filter_radius : int + Radius of the filter filter to use + do_3d : bool, optional + If set to True, will do a 3D filtering, by default False + + + Returns + ------- + ij.ImagePlus + Filtered ImagePlus + """ + log.info("Applying filter %s with radius %d" % (filter_method, filter_radius)) + + if filter_method not in [ + "Median", + "Mean", + "Gaussian Blur", + "Minimum", + "Maximum", + ]: + raise ValueError( + "filter_method must be one of: Median, Mean, Gaussian Blur, Minimum, Maximum" + ) + + if do_3d: + filter = filter_method + " 3D..." + else: + filter = filter_method + "..." + + options = ( + "sigma=" + if filter_method == "Gaussian Blur" + else "radius=" + str(filter_radius) + " stack" + ) + + log.debug("Filter: <%s> with options <%s>" % (filter, options)) + + imageplus = imp.duplicate() + IJ.run(imageplus, filter, options) + + return imageplus + + +def apply_background_subtraction(imp, rolling_ball_radius, do_3d=False): + """Perform background subtraction using a rolling ball method. + + Parameters + ---------- + imp : ij.ImagePlus + Input ImagePlus to filter and threshold + rolling_ball_radius : int + Radius of the rolling ball filter to use + do_3d : bool, optional + If set to True, will do a 3D filtering, by default False + + Returns + ------- + ij.ImagePlus + Filtered ImagePlus + """ + log.info("Applying rolling ball with radius %d" % rolling_ball_radius) + + options = "rolling=" + str(rolling_ball_radius) + " stack" if do_3d else "" + + log.debug("Background subtraction options: %s" % options) + + imageplus = imp.duplicate() + IJ.run(imageplus, "Substract Background...", options) + + return imageplus + + +def apply_threshold(imp, threshold_method, do_3d=True): + """Apply a threshold method to the input ImagePlus. + + Parameters + ---------- + imp : ij.ImagePlus + Input ImagePlus to filter and threshold + threshold_method : str + Name of the threshold method to use + do_3d : bool, optional + If set to True, the automatic threshold will be done on a 3D stack, by default True + + Returns + ------- + ij.ImagePlus + Thresholded ImagePlus + """ + + log.info("Applying threshold method %s" % threshold_method) + + imageplus = imp.duplicate() + + auto_threshold_options = ( + threshold_method + " " + "dark" + " " + "stack" if do_3D else "" + ) + + log.debug("Auto threshold options: %s" % auto_threshold_options) + + IJ.setAutoThreshold(imageplus, auto_threshold_options) + + convert_to_binary_options = ( + "method=" + threshold_method + " " + "background=Dark" + " " + "black" + ) + + log.debug("Convert to binary options: %s" % convert_to_binary_options) + + IJ.run(imageplus, "Convert to Mask", convert_to_binary_options) + + return imageplus diff --git a/src/imcflibs/imagej/shading.py b/src/imcflibs/imagej/shading.py index 02a11af1..1e3a62fc 100644 --- a/src/imcflibs/imagej/shading.py +++ b/src/imcflibs/imagej/shading.py @@ -182,6 +182,7 @@ def process_files(files, outpath, model_file, fmt): if model: model.close() + def simple_flatfield_correction(imp, sigma=20.0): """Perform a simple flatfield correction to a given ImagePlus stack. @@ -205,10 +206,7 @@ def simple_flatfield_correction(imp, sigma=20.0): # Normalize image to the highest value of original (requires 32-bit image) IJ.run(flatfield, "32-bit", "") - IJ.run( - flatfield, - "Divide...", - "value=" + str(stats.max)) + IJ.run(flatfield, "Divide...", "value=" + str(stats.max)) ic = ImageCalculator() flatfield_corrected = ic.run("Divide create", imp, flatfield) diff --git a/src/imcflibs/pathtools.py b/src/imcflibs/pathtools.py index 40977b22..166a5186 100644 --- a/src/imcflibs/pathtools.py +++ b/src/imcflibs/pathtools.py @@ -2,8 +2,8 @@ import os.path import platform -from os import sep import re +from os import sep from . import strtools from .log import LOG as log @@ -38,6 +38,7 @@ def parse_path(path, prefix=""): dict The parsed (and possibly combined) path split into its components, with the following keys: + - `orig` : The full string as passed into this function (possibly combined with the prefix in case one was specified). - `full` : The same as `orig` with separators adjusted to the current @@ -357,6 +358,22 @@ def folder_size(source): return total_size +def create_directory(new_path): + """Create a new directory at the specified path. + + This is a workaround for Python 2.7 where `os.makedirs()` is lacking + the `exist_ok` parameter that is present in Python 3.2 and newer. + + Parameters + ---------- + new_path : str + Path where the new directory should be created. + """ + + if not os.path.exists(new_path): + os.makedirs(new_path) + + # pylint: disable-msg=C0103 # we use the variable name 'exists' in its common spelling (lowercase), so # removing this workaround will be straightforward at a later point