Skip to content
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -116,3 +116,4 @@ clean/
.pytest_cache/
__pycache__/
mypy_cache/
.claude/
32 changes: 17 additions & 15 deletions src/cellmap_data/image_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,35 +38,35 @@ def __init__(
self.base_path = str(path)
self.path = (UPath(path) / f"s{scale_level}").path
self.label_class = self.target_class = target_class
if len(write_voxel_shape) == len(axis_order) + 1 and "c" not in axis_order:
# Add channel axis if missing
axis_order = "c" + axis_order
if isinstance(scale, Sequence):
if len(axis_order) > len(scale):
scale = [scale[0]] * (len(axis_order) - len(scale)) + list(scale)
scale = {c: s for c, s in zip(axis_order, scale)}
scale = {c: s for c, s in zip(axis_order[::-1], scale[::-1])}
self.scale = scale
if isinstance(write_voxel_shape, Sequence):
if len(axis_order) > len(write_voxel_shape): # TODO: This might be a bug
write_voxel_shape = [1] * (
len(axis_order) - len(write_voxel_shape)
) + list(write_voxel_shape)
elif (
len(axis_order) + 1 == len(write_voxel_shape) and "c" not in axis_order
):
axis_order = "c" + axis_order
write_voxel_shape = {c: t for c, t in zip(axis_order, write_voxel_shape)}
self.scale = scale
self.axes = axis_order
# Assume axes correspond to last dimensions of voxel shape
self.spatial_axes = axis_order[-len(scale) :]
self.bounding_box = bounding_box
self.write_voxel_shape = write_voxel_shape
self.write_world_shape = {
c: write_voxel_shape[c] * scale[c] for c in axis_order
c: write_voxel_shape[c] * scale[c] for c in self.spatial_axes
}
self.axes = axis_order[: len(write_voxel_shape)]
self.scale_level = scale_level
self.context = context
self.overwrite = overwrite
self.dtype = dtype
self.fill_value = fill_value
dims = [c for c in axis_order]
self.metadata = {
"offset": list(self.offset.values()),
"axes": dims,
"axes": [c for c in axis_order],
"voxel_size": list(self.scale.values()),
"shape": list(self.shape.values()),
"units": "nanometer",
Expand Down Expand Up @@ -170,7 +170,8 @@ def world_shape(self) -> Mapping[str, float]:
return self._world_shape
except AttributeError:
self._world_shape = {
c: self.bounding_box[c][1] - self.bounding_box[c][0] for c in self.axes
c: self.bounding_box[c][1] - self.bounding_box[c][0]
for c in self.spatial_axes
}
return self._world_shape

Expand All @@ -180,7 +181,8 @@ def shape(self) -> Mapping[str, int]:
return self._shape
except AttributeError:
self._shape = {
c: int(np.ceil(self.world_shape[c] / self.scale[c])) for c in self.axes
c: int(np.ceil(self.world_shape[c] / self.scale[c]))
for c in self.spatial_axes
}
return self._shape

Expand All @@ -199,7 +201,7 @@ def offset(self) -> Mapping[str, float]:
try:
return self._offset
except AttributeError:
self._offset = {c: self.bounding_box[c][0] for c in self.axes}
self._offset = {c: self.bounding_box[c][0] for c in self.spatial_axes}
return self._offset

@property
Expand Down Expand Up @@ -228,7 +230,7 @@ def align_coords(
self, coords: Mapping[str, tuple[Sequence, np.ndarray]]
) -> Mapping[str, tuple[Sequence, np.ndarray]]:
aligned_coords = {}
for c in self.axes:
for c in self.spatial_axes:
aligned_coords[c] = np.array(
self.array.coords[c][
np.abs(np.array(self.array.coords[c])[:, None] - coords[c]).argmin(
Expand Down
238 changes: 123 additions & 115 deletions src/cellmap_data/utils/view.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,8 @@
import webbrowser
from multiprocessing.pool import ThreadPool

import neuroglancer
import numpy as np
import s3fs
import zarr
from IPython.core.getipython import get_ipython
from IPython.display import IFrame, display
from tensorstore import d as ts_d
from tensorstore import open as ts_open
from upath import UPath

logger = logging.getLogger(__name__)

Expand All @@ -30,6 +23,8 @@

def get_multiscale_voxel_sizes(path: str):
if "s3://" in path:
import s3fs

# Use s3fs to read the zarr metadata
fs = s3fs.S3FileSystem(anon=True)
store = s3fs.S3Map(
Expand Down Expand Up @@ -114,6 +109,10 @@ def open_neuroglancer(metadata):

Returns the Neuroglancer.Viewer object.
"""
import neuroglancer
from IPython.core.getipython import get_ipython
from IPython.display import IFrame, display

# 1) bind to localhost on a random port
neuroglancer.set_server_bind_address("localhost", 0)
viewer = neuroglancer.Viewer()
Expand Down Expand Up @@ -178,7 +177,7 @@ def get_layer(
data_path: str,
layer_type: str = "image",
multiscale: bool = True,
) -> neuroglancer.Layer:
):
"""
Get a Neuroglancer layer from a zarr data path for a LocalVolume.

Expand All @@ -196,6 +195,9 @@ def get_layer(
neuroglancer.Layer
The Neuroglancer layer.
"""
import neuroglancer
from upath import UPath

# Construct an xarray with Tensorstore backend
# Get metadata
if multiscale:
Expand All @@ -217,6 +219,116 @@ def get_layer(
voxel_offset=metadata[scale]["voxel_offset"],
)
)

class ScalePyramid(neuroglancer.LocalVolume):
"""A neuroglancer layer that provides volume data on different scales.
Mimics a LocalVolume.
From https://github.com/funkelab/funlib.show.neuroglancer/blob/master/funlib/show/neuroglancer/scale_pyramid.py

Args:
----
volume_layers (``list`` of ``LocalVolume``):

One ``LocalVolume`` per provided resolution.
"""

def __init__(self, volume_layers):
volume_layers = volume_layers

super(neuroglancer.LocalVolume, self).__init__()

logger.debug("Creating scale pyramid...")

self.min_voxel_size = min(
[tuple(layer.dimensions.scales) for layer in volume_layers]
)
self.max_voxel_size = max(
[tuple(layer.dimensions.scales) for layer in volume_layers]
)

self.dims = len(volume_layers[0].dimensions.scales)
self.volume_layers = {
tuple(
int(x)
for x in map(
operator.truediv,
layer.dimensions.scales,
self.min_voxel_size,
)
): layer
for layer in volume_layers
}

logger.debug("min_voxel_size: %s", self.min_voxel_size)
logger.debug("scale keys: %s", self.volume_layers.keys())
logger.debug(self.info())

@property
def volume_type(self):
return self.volume_layers[(1,) * self.dims].volume_type

@property
def token(self):
return self.volume_layers[(1,) * self.dims].token

def info(self):
reference_layer = self.volume_layers[(1,) * self.dims]
reference_info = reference_layer.info()

info = {
"dataType": reference_info["dataType"],
"encoding": reference_info["encoding"],
"generation": reference_info["generation"],
"coordinateSpace": reference_info["coordinateSpace"],
"shape": reference_info["shape"],
"volumeType": reference_info["volumeType"],
"voxelOffset": reference_info["voxelOffset"],
"chunkLayout": reference_info["chunkLayout"],
"downsamplingLayout": reference_info["downsamplingLayout"],
"maxDownsampling": int(
np.prod(
np.array(self.max_voxel_size)
// np.array(self.min_voxel_size)
)
),
"maxDownsampledSize": reference_info["maxDownsampledSize"],
"maxDownsamplingScales": reference_info["maxDownsamplingScales"],
}

return info

def get_encoded_subvolume(self, data_format, start, end, scale_key=None):
if scale_key is None:
scale_key = ",".join(("1",) * self.dims)

scale = tuple(int(s) for s in scale_key.split(","))
closest_scale = None
min_diff = np.inf
for volume_scales in self.volume_layers.keys():
scale_diff = np.array(scale) // np.array(volume_scales)
if any(scale_diff < 1):
continue
scale_diff = scale_diff.max()
if scale_diff < min_diff:
min_diff = scale_diff
closest_scale = volume_scales

assert closest_scale is not None
relative_scale = np.array(scale) // np.array(closest_scale)

return self.volume_layers[closest_scale].get_encoded_subvolume(
data_format,
start,
end,
scale_key=",".join(map(str, relative_scale)),
)

def get_object_mesh(self, object_id):
return self.volume_layers[(1,) * self.dims].get_object_mesh(object_id)

def invalidate(self):
return self.volume_layers[(1,) * self.dims].invalidate()

volume = ScalePyramid(layers)

else:
Expand Down Expand Up @@ -319,114 +431,10 @@ def parse_multiscale_metadata(data_path: str):
return scales, parsed


class ScalePyramid(neuroglancer.LocalVolume):
"""A neuroglancer layer that provides volume data on different scales.
Mimics a LocalVolume.
From https://github.com/funkelab/funlib.show.neuroglancer/blob/master/funlib/show/neuroglancer/scale_pyramid.py

Args:
----
volume_layers (``list`` of ``LocalVolume``):

One ``LocalVolume`` per provided resolution.
"""

def __init__(self, volume_layers):
volume_layers = volume_layers

super(neuroglancer.LocalVolume, self).__init__()

logger.debug("Creating scale pyramid...")

self.min_voxel_size = min(
[tuple(layer.dimensions.scales) for layer in volume_layers]
)
self.max_voxel_size = max(
[tuple(layer.dimensions.scales) for layer in volume_layers]
)

self.dims = len(volume_layers[0].dimensions.scales)
self.volume_layers = {
tuple(
int(x)
for x in map(
operator.truediv, layer.dimensions.scales, self.min_voxel_size
)
): layer
for layer in volume_layers
}

logger.debug("min_voxel_size: %s", self.min_voxel_size)
logger.debug("scale keys: %s", self.volume_layers.keys())
logger.debug(self.info())

@property
def volume_type(self):
return self.volume_layers[(1,) * self.dims].volume_type

@property
def token(self):
return self.volume_layers[(1,) * self.dims].token

def info(self):
reference_layer = self.volume_layers[(1,) * self.dims]
# return reference_layer.info()

reference_info = reference_layer.info()

info = {
"dataType": reference_info["dataType"],
"encoding": reference_info["encoding"],
"generation": reference_info["generation"],
"coordinateSpace": reference_info["coordinateSpace"],
"shape": reference_info["shape"],
"volumeType": reference_info["volumeType"],
"voxelOffset": reference_info["voxelOffset"],
"chunkLayout": reference_info["chunkLayout"],
"downsamplingLayout": reference_info["downsamplingLayout"],
"maxDownsampling": int(
np.prod(np.array(self.max_voxel_size) // np.array(self.min_voxel_size))
),
"maxDownsampledSize": reference_info["maxDownsampledSize"],
"maxDownsamplingScales": reference_info["maxDownsamplingScales"],
}

return info

def get_encoded_subvolume(self, data_format, start, end, scale_key=None):
if scale_key is None:
scale_key = ",".join(("1",) * self.dims)

scale = tuple(int(s) for s in scale_key.split(","))
closest_scale = None
min_diff = np.inf
for volume_scales in self.volume_layers.keys():
scale_diff = np.array(scale) // np.array(volume_scales)
if any(scale_diff < 1):
continue
scale_diff = scale_diff.max()
if scale_diff < min_diff:
min_diff = scale_diff
closest_scale = volume_scales

assert closest_scale is not None
relative_scale = np.array(scale) // np.array(closest_scale)

return self.volume_layers[closest_scale].get_encoded_subvolume(
data_format,
start,
end,
scale_key=",".join(map(str, relative_scale)),
)

def get_object_mesh(self, object_id):
return self.volume_layers[(1,) * self.dims].get_object_mesh(object_id)

def invalidate(self):
return self.volume_layers[(1,) * self.dims].invalidate()


def open_ds_tensorstore(dataset_path: str, mode="r", concurrency_limit=None):
from tensorstore import d as ts_d
from tensorstore import open as ts_open

# open with zarr or n5 depending on extension
filetype = (
"zarr" if dataset_path.rfind(".zarr") > dataset_path.rfind(".n5") else "n5"
Expand Down
Loading