Skip to content

Commit

Permalink
Add srm and srg tests and remove extra commenting in repo
Browse files Browse the repository at this point in the history
  • Loading branch information
BC-Chang committed Nov 21, 2024
1 parent 60da34e commit d93e594
Show file tree
Hide file tree
Showing 26 changed files with 344 additions and 235 deletions.
6 changes: 3 additions & 3 deletions dpm_tools/io/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
"""

from ._io_utils import find_files_with_ext, get_tiff_metadata, natural_sort, combine_slices, convert_filetype
from .io_utils import find_files_with_ext, get_tiff_metadata, natural_sort, combine_slices, convert_filetype

from ._read_data import read_image, Image
from .read_data import read_image, Image

from ._write_data import write_image
from .write_data import write_image
4 changes: 2 additions & 2 deletions dpm_tools/io/_io_utils.py → dpm_tools/io/io_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from typing import Any, Tuple
import re

from ._read_data import read_image
from ._write_data import write_image
from .read_data import read_image
from .write_data import write_image


def find_files_with_ext(directory: pathlib.Path, extension: str) -> list:
Expand Down
63 changes: 0 additions & 63 deletions dpm_tools/io/_read_data.py → dpm_tools/io/read_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,66 +293,3 @@ def _read_data_from_files(self):
self.vector = images[1:]


# @dataclass(kw_only=True)
# class ImageFromFile(Image):
# """
# ImageFromFile dataclass inherited from the Image dataclass. This dataclass allows users to pass the path to an image file
#
# At least one of scalar or vector attributes must be provided.
#
# Attributes:
# basepath (pathlib.Path): Path to the parent directory holding the data file
# filename (str): Name of the data file
# meta (dict): A dictionary of the necessary metadata to load the image
# filepath: Joint file path derived from basename and filename (i.e. basename / filename)
# image: The 3D image (e.g. binary image, pressure field, etc.)
# vector (list[np.ndarray, np.ndarray, np.ndarray]): A list containing 3 np.ndarrays of the vector components
# shape (tuple): The shape of the 3D ndarray containing the image
# nx (int): Width of the image (in voxels)
# ny (int): Height of the image (in voxels)
# nz (int): Number of slices of the image
# magnitude: Magnitude of vector (if vector is provided)
# """
# basepath: pathlib.Path
# filename: str
# meta: field(default_factory=dict) = None
# filepath: str = field(init=False)
# image: np.ndarray = field(init=False)
#
# def __post_init__(self):
# self.filepath = os.path.join(self.basepath, self.filename)
# self.basename, self.ext = self.filename.rsplit('.', 1)
# self.image = read_image(self.filepath, meta=self.meta)
#
# # Add 3rd axis if image is 2D
# if self.image.ndim == 2:
# self.image = self.image[np.newaxis, :, :]
#
# self.nz, self.nx, self.ny = self.image.shape
#
# # TODO add multiple fields (ex. velocity field)
# # TODO add functionality for coordinate data


# TODO combine VectorImage and Image classes
# @dataclass
# class Vector(Image):
# scalar: np.ndarray = None
# vector: list = None
#
# def __post_init__(self):
# assert self.scalar is not None or self.vector is not None, "Provide either scalar or vector data"
#
# if self.scalar.ndim == 2:
# self.scalar = self.scalar[np.newaxis, :, :]
#
# self.nz, self.nx, self.ny = self.scalar.shape
#
#
# self.magnitude = np.sqrt(self.vector[0]**2 + self.vector[1]**2 + self.vector[2]**2)

if __name__ == "__main__":
scalar_path = pathlib.Path('C:/Users/bcc2459/Documents/dpm_tools/data/3_fractures.tif')
img = Image(filepaths=[scalar_path, scalar_path, scalar_path, scalar_path], meta=[{}, {}, {}, {}])
print(img.scalar)

File renamed without changes.
6 changes: 3 additions & 3 deletions dpm_tools/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@
minkowski_map
"""

from ._maps import slicewise_edt, slicewise_mis, edt, sdt, mis, chords, time_of_flight, constriction_factor, minkowski_map
from .maps import slicewise_edt, slicewise_mis, edt, sdt, mis, chords, time_of_flight, constriction_factor, minkowski_map

from ._feature_utils import _morph_drain_config, _set_linear_trend
from .feature_utils import _morph_drain_config, _set_linear_trend

from ._scalars import minkowski_functionals, morph_drain, heterogeneity_curve
from .scalars import minkowski_functionals, morph_drain, heterogeneity_curve

# from .binary_configs import *

Expand Down
6 changes: 0 additions & 6 deletions dpm_tools/metrics/_minkowski_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,17 +40,11 @@ def initialize_mapping(n_dim):

@jit('u1[:, :](u1[:, :], u8, u8)', nopython=True, parallel=False)
def get_binary_configs_2d(image, dim0, dim1):
# n_threads = numba.get_num_threads()
# chunk_size = n_threads * 8
# n_chunks = (dim0 // (chunk_size)) + 1

mask = np.zeros((dim0 - 1, dim1 - 1), dtype=np.uint8)

IC = initialize_mapping(2)

# for chunk in prange(n_chunks):
# start_x = chunk * chunk_size
# end_x = min(start_x + chunk_size + 2, dim0)
for x in range(dim0 - 1):
for y in range(dim1 - 1):
mask_val = (int(image[x, y] == 1) +
Expand Down
File renamed without changes.
4 changes: 1 addition & 3 deletions dpm_tools/metrics/_maps.py → dpm_tools/metrics/maps.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from edt import edt as edist

from ._minkowski_coeff import *
from ._feature_utils import pad_to_size, create_kernel, _centered
from .feature_utils import pad_to_size, create_kernel, _centered
from ._fft_backends import _get_backend
from ._minkowski_utils import *

Expand Down Expand Up @@ -95,7 +95,6 @@ def slicewise_mis(image, **kwargs) -> np.ndarray:
Returns:
numpy.ndarray: Maximum inscribed sphere computed on each slice (maximum inscribed disk)
"""
# ? why do we pad this?
input_image = np.pad(array=image.copy(), pad_width=((0, 0), (0, 0), (0, 1)), constant_values=1)

# Calculate slice-wise local thickness from PoreSpy
Expand All @@ -122,7 +121,6 @@ def chords(image) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
sz_y = np.zeros_like(ellipse_area)
for i in range(image.shape[0]):
# Calculate the chords in x and y for each slice in z
# chords = ps.filters.apply_chords_3D(image)
chords_x = ps.filters.apply_chords(im=image[i, :, :], spacing=0, trim_edges=False, axis=0)
chords_y = ps.filters.apply_chords(im=image[i, :, :], spacing=0, trim_edges=False, axis=1)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from typing import Tuple
import edt
from ._minkowski_coeff import contributions_2d, contributions_3d
from ._feature_utils import _morph_drain_config, _get_heterogeneity_centers_3d
from .feature_utils import _morph_drain_config, _get_heterogeneity_centers_3d
from ._minkowski_utils import get_configs_histogram_2d, get_configs_histogram_3d


Expand Down Expand Up @@ -61,8 +61,6 @@ def _minkowski_3d(image: np.ndarray) -> Tuple[float, float, float, float]:

# Get the isotropic configurations (3D)
nx, ny, nz = image.shape
# bin_img = np.transpose(binary_image, (2, 1, 0))
# bin_img = np.transpose(binary_image, (1, 2, 0))
configs_hist = get_configs_histogram_3d(image, nx, ny, nz)
v3 = np.sum(contributions_3d["v3"] / 8. * configs_hist)
v2 = np.sum(contributions_3d["v2"] / 24. * 4 * configs_hist)
Expand Down
25 changes: 14 additions & 11 deletions dpm_tools/segmentation/segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def statistical_region_merging(image: np.ndarray, Q: float = 5.0, normalize: boo
numpy.ndarray: A numpy.ndarray labeled image of the same shape and datatype as the input image.
"""
# Get image dimensions
image_dims = str(image.ndims)
image_dims = str(image.ndim)
assert image_dims in ["2", "3"], "Image must be either 2 or 3 dimensional ndarray"

# Get image datatype
Expand All @@ -40,6 +40,7 @@ def statistical_region_merging(image: np.ndarray, Q: float = 5.0, normalize: boo
image[image < img_min] = img_min
image[image > img_max] = img_max
image = (image - img_min) / (img_max - img_min) * np.iinfo(image.dtype).max
image = image.astype(image_dtype)

func_dict = {"2":
{
Expand All @@ -55,21 +56,22 @@ def statistical_region_merging(image: np.ndarray, Q: float = 5.0, normalize: boo
}
}

srg_obj = func_dict[image_dims][image_dtype](image, Q)
srg_obj.segment()

return srg_obj.get_result()
srm_obj = func_dict[image_dims][image_dtype](image, Q)
srm_obj.segment()
segmentation = srm_obj.get_result()

return segmentation


def seeded_region_growing(image: np.ndarray, seed_image: np.ndarray, normalize: bool = True)
def seeded_region_growing(image: np.ndarray, seed_image: np.ndarray, normalize: bool = True):
""" Perform seeded region growing on a gray level image using predefined seeds
"""
# TODO: Write wrapper code for dpm_srg
image_dims = image.ndims
assert image_dims in [2, 3], "Image must be either 2 or 3 dimensional ndarray"
image_dims = str(image.ndim)
assert image_dims in ["2", "3"], "Image must be either 2 or 3 dimensional ndarray"

seed_dims = seed_image.ndims
assert seed_dims == image_dims, "Seed image must have the same dimensions as the input image"
seed_dims = seed_image.ndim
assert seed_dims == int(image_dims), "Seed image must have the same dimensions as the input image"

# Get image datatype
image_dtype = str(image.dtype)
Expand All @@ -83,6 +85,7 @@ def seeded_region_growing(image: np.ndarray, seed_image: np.ndarray, normalize:
image[image < img_min] = img_min
image[image > img_max] = img_max
image = (image - img_min) / (img_max - img_min) * np.iinfo(image.dtype).max
image = image.astype(image_dtype)

func_dict = {"2":
{
Expand All @@ -98,7 +101,7 @@ def seeded_region_growing(image: np.ndarray, seed_image: np.ndarray, normalize:
}
}

srg_obj = func_dict[image_dims][image_dtype](image, Q)
srg_obj = func_dict[image_dims][image_dtype](image, seed_image)
srg_obj.segment()

return srg_obj.get_result()
3 changes: 1 addition & 2 deletions dpm_tools/visualization/_3d_vis_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,11 @@ def _custom_cmap(vector, color_map: str = 'turbo') -> Tuple[ListedColormap, floa

min_magnitude = np.percentile(log_mag, 25)
max_magnitude = np.percentile(log_mag, 99)
# print(f'Log min. = {min_magnitude}, Log max. = {max_magnitude}')

cmap_modified = cm.get_cmap(color_map, 65535)
spacing = lambda x: np.log10(x)
new_cmap = ListedColormap(cmap_modified(spacing(np.linspace(1, 10, 65535))))
# return min_magnitude, max_magnitude

return new_cmap, 10 ** min_magnitude, 10 ** max_magnitude


Expand Down
4 changes: 2 additions & 2 deletions dpm_tools/visualization/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
plot_medial_axis
"""

from ._plot_2d import hist, plot_slice, make_thumbnail, make_gif, plot_heterogeneity_curve
from .plot_2d import hist, plot_slice, make_thumbnail, make_gif, plot_heterogeneity_curve

from ._plot_3d import orthogonal_slices, plot_isosurface, bounding_box, plot_glyph, plot_streamlines, plot_scalar_volume, plot_medial_axis
from .plot_3d import orthogonal_slices, plot_isosurface, bounding_box, plot_glyph, plot_streamlines, plot_scalar_volume, plot_medial_axis

# from ._vis_utils import *

Expand Down
2 changes: 0 additions & 2 deletions dpm_tools/visualization/_vis_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@ def _scale_image(image_data: np.ndarray, scale_to: type = np.uint8) -> np.ndarra
:rtype: np.ndarray
"""

# assert image_data.dtype.type is not scale_to, f"Image data is already of type {scale_to.__name__}"

if 'int' in scale_to.__name__:
dtype_info = np.iinfo(scale_to)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from typing import Any

from ._vis_utils import _make_dir, _write_hist_csv, _scale_image
from ..metrics._feature_utils import _sigmoid
from ..metrics.feature_utils import _sigmoid


# TODO Add fig save decorator
Expand Down Expand Up @@ -65,8 +65,8 @@ def hist(data,
# save_path = f'{_make_dir("./figures")}'

# TODO add write_csv with proper savepath
if write_csv:
_write_hist_csv(freq, bins, './figures/histogram_csv.csv')
# if write_csv:
# _write_hist_csv(freq, bins, './figures/histogram_csv.csv')

# TODO add savefig?

Expand Down
Loading

0 comments on commit d93e594

Please sign in to comment.