Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions cellpose/contrib/distributed_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -733,7 +733,8 @@ class in this module. If you are running on the Janelia LSF cluster, see

temp_zarr_path = temporary_directory + '/segmentation_unstitched.zarr'
temp_zarr = zarr.open(
temp_zarr_path, 'w',
store=temp_zarr_path,
mode = 'w',
shape=input_zarr.shape,
chunks=blocksize,
dtype=np.uint32,
Expand Down Expand Up @@ -773,8 +774,8 @@ class in this module. If you are running on the Janelia LSF cluster, see
ncpus=1,
memory="15GB",
mem=int(15e9),
queue=None,
job_extra_directives=[],
queue=cluster.locals_store['kwargs']['queue'],
job_extra_directives=cluster.locals_store['kwargs']['job_extra_directives'],
)

segmentation_da = dask.array.from_zarr(temp_zarr)
Expand Down Expand Up @@ -886,6 +887,8 @@ def block_face_adjacency_graph(faces, nlabels):
face = np.concatenate((a, b), axis=np.argmin(a.shape))
mapped = dask_image.ndmeasure._utils._label._across_block_label_grouping(face, structure)
all_mappings.append(mapped)
if not all_mappings:
return scipy.sparse.coo_matrix((nlabels+1, nlabels+1)).tocsr()
i, j = np.concatenate(all_mappings, axis=1)
v = np.ones_like(i)
return scipy.sparse.coo_matrix((v, (i, j)), shape=(nlabels+1, nlabels+1)).tocsr()
Expand Down
10 changes: 6 additions & 4 deletions cellpose/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def eval(self, x, batch_size=8, resample=True, channels=None, channel_axis=None,
flow_threshold (float, optional): flow error threshold (all cells with errors below threshold are kept) (not used for 3D). Defaults to 0.4.
cellprob_threshold (float, optional): all pixels with value above threshold kept for masks, decrease to find more and larger masks. Defaults to 0.0.
do_3D (bool, optional): set to True to run 3D segmentation on 3D/4D image input. Defaults to False.
flow3D_smooth (int, optional): if do_3D and flow3D_smooth>0, smooth flows with gaussian filter of this stddev. Defaults to 0.
flow3D_smooth (int, list[int] optional): if do_3D and flow3D_smooth>0, smooth flows with gaussian filter of this stddev. Defaults to 0.
anisotropy (float, optional): for 3D segmentation, optional rescaling factor (e.g. set to 2.0 if Z is sampled half as dense as X or Y). Defaults to None.
stitch_threshold (float, optional): if stitch_threshold>0.0 and not do_3D, masks are stitched in 3D to return volume segmentation. Defaults to 0.0.
min_size (int, optional): all ROIs below this size, in pixels, will be discarded. Defaults to 15.
Expand Down Expand Up @@ -319,10 +319,12 @@ def eval(self, x, batch_size=8, resample=True, channels=None, channel_axis=None,
do_3D=do_3D,
anisotropy=anisotropy)

if do_3D:
if flow3D_smooth > 0:
if do_3D:
if isinstance(flow3D_smooth, int):
flow3D_smooth = [flow3D_smooth]*3
if any(v > 0 for v in flow3D_smooth):
models_logger.info(f"smoothing flows with sigma={flow3D_smooth}")
dP = gaussian_filter(dP, (0, flow3D_smooth, flow3D_smooth, flow3D_smooth))
dP = gaussian_filter(dP, [0, *flow3D_smooth])
torch.cuda.empty_cache()
gc.collect()

Expand Down