Skip to content

Commit fbc7fe6

Browse files
mccruz07bethac07
andauthored
Update runstardist.py (#205)
* Update runstardist.py Potentially resolves the issue#196 stardist plugin does not free gpu memory. Now you can runStardist and runCellpose on the same pipeline. Tested on windows, need to test on Macs * Only run if cuda is available * Add optional GPU memory control code --------- Co-authored-by: Beth Cimini <bethac07@users.noreply.github.com>
1 parent ab54863 commit fbc7fe6

1 file changed

Lines changed: 48 additions & 5 deletions

File tree

active_plugins/runstardist.py

Lines changed: 48 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88
from skimage.transform import resize
99
from stardist.models import StarDist2D, StarDist3D
1010
from csbdeep.utils import normalize
11+
from numba import cuda
12+
import tensorflow as tf
1113

1214
#################################
1315
#
@@ -206,6 +208,25 @@ def create_settings(self):
206208
maxval=1.0,
207209
doc=f"""\
208210
Prevent overlapping
211+
""",
212+
)
213+
214+
self.manage_gpu = Binary(
215+
text="Manually set how much GPU memory each worker can use?",
216+
value=False,
217+
doc="""
218+
If enabled, you can manually set how much of the GPU memory each worker can use.
219+
This is likely to provide the most benefit on Macs. Do not use in a multi-GPU setup.""",
220+
)
221+
222+
self.manual_GPU_memory_GB = Float(
223+
text="GPU memory (in GB) for each worker",
224+
value=0.5,
225+
minval=0.0000001,
226+
maxval=30,
227+
doc="""\
228+
GPU memory in GB available to each worker. Value should be set such that this number times the number
229+
of workers in each copy of CellProfiler times the number of copies of CellProfiler running (if applicable) is <1
209230
""",
210231
)
211232

@@ -224,6 +245,8 @@ def settings(self):
224245
self.model_choice3D,
225246
self.prob_thresh,
226247
self.nms_thresh,
248+
self.manage_gpu,
249+
self.manual_GPU_memory_GB,
227250
]
228251

229252
def visible_settings(self):
@@ -250,7 +273,10 @@ def visible_settings(self):
250273
if self.tile_image.value:
251274
vis_settings += [self.n_tiles_x, self.n_tiles_y]
252275

253-
vis_settings += [self.prob_thresh, self.nms_thresh, self.gpu_test]
276+
vis_settings += [self.prob_thresh, self.nms_thresh, self.gpu_test, self.manage_gpu]
277+
278+
if self.manage_gpu.value:
279+
vis_settings += [self.manual_GPU_memory_GB]
254280

255281
return vis_settings
256282

@@ -271,6 +297,23 @@ def run(self, workspace):
271297
raise ValueError(
272298
"Greyscale images are not supported by this model. Please provide a color overlay."
273299
)
300+
301+
# Stolen nearly wholesale from https://wiki.ncsa.illinois.edu/display/ISL20/Managing+GPU+memory+when+using+Tensorflow+and+Pytorch
302+
if self.manage_gpu.value:
303+
# First, Get a list of GPU devices
304+
gpus = tf.config.list_physical_devices('GPU')
305+
if len(gpus) > 0:
306+
# Restrict to only the first GPU.
307+
tf.config.set_visible_devices(gpus[:1], device_type='GPU')
308+
# Create a LogicalDevice with the appropriate memory limit
309+
log_dev_conf = tf.config.LogicalDeviceConfiguration(
310+
memory_limit=self.manual_GPU_memory_GB.value*1024 # 2 GB
311+
)
312+
# Apply the logical device configuration to the first GPU
313+
tf.config.set_logical_device_configuration(
314+
gpus[0],
315+
[log_dev_conf])
316+
274317

275318
if self.model.value == CUSTOM_MODEL:
276319
model_directory, model_name = os.path.split(
@@ -347,6 +390,8 @@ def run(self, workspace):
347390
objects.add_objects(y, self.y_name.value)
348391

349392
self.add_measurements(workspace)
393+
if cuda.is_available():
394+
cuda.current_context().memory_manager.deallocations.clear()
350395

351396
if self.show_window:
352397
workspace.display_data.x_data = x_data
@@ -389,11 +434,9 @@ def display(self, workspace, figure):
389434
)
390435

391436
def do_check_gpu(self):
392-
import tensorflow
393-
394-
if len(tensorflow.config.list_physical_devices("GPU")) > 0:
437+
if len(tf.config.list_physical_devices("GPU")) > 0:
395438
message = "GPU appears to be working correctly!"
396-
print("GPUs:", tensorflow.config.list_physical_devices("GPU"))
439+
print("GPUs:", tf.config.list_physical_devices("GPU"))
397440
else:
398441
message = (
399442
"GPU test failed. There may be something wrong with your configuration."

0 commit comments

Comments
 (0)