Coverage for openhcs/debug/example_export_clean.py: 0.0%

52 statements  

« prev     ^ index     » next       coverage.py v7.10.3, created at 2025-08-14 05:57 +0000

1#!/usr/bin/env python3 

2""" 

3OpenHCS Pipeline Script - Generated from tmpypa319tx.pkl 

4Generated: 2025-07-21 13:37:01.538763 

5""" 

6 

7import sys 

8import os 

9from pathlib import Path 

10 

11# Add OpenHCS to path 

12sys.path.insert(0, "/home/ts/code/projects/openhcs") 

13 

14from openhcs.core.orchestrator.orchestrator import PipelineOrchestrator 

15from openhcs.core.steps.function_step import FunctionStep 

16from openhcs.core.config import (GlobalPipelineConfig, PathPlanningConfig, VFSConfig, ZarrConfig, 

17 MaterializationBackend, ZarrCompressor, ZarrChunkStrategy) 

18from openhcs.constants.constants import VariableComponents, Backend, Microscope 

19 

20# Function imports 

21from openhcs.processing.backends.analysis.cell_counting_cpu import count_cells_single_channel 

22from openhcs.processing.backends.analysis.skan_axon_analysis import skan_axon_skeletonize_and_analyze 

23from openhcs.processing.backends.assemblers.assemble_stack_cupy import assemble_stack_cupy 

24from openhcs.processing.backends.pos_gen.ashlar_main_gpu import ashlar_compute_tile_positions_gpu 

25from openhcs.processing.backends.processors.cupy_processor import create_composite, stack_percentile_normalize, tophat 

26from openhcs.processing.backends.processors.torch_processor import stack_percentile_normalize 

27 

28def create_pipeline(): 

29 """Create and return the pipeline configuration.""" 

30 

31 # Plate paths 

32 plate_paths = ['/home/ts/nvme_usb/IMX/20250528-new-f04-analogs-n1-2-Plate-1_Plate_23318'] 

33 

34 # Global configuration 

35 global_config = GlobalPipelineConfig( 

36 num_workers=5, 

37 path_planning=PathPlanningConfig( 

38 output_dir_suffix="_stitched", 

39 global_output_folder="/home/ts/nvme_usb/OpenHCS/", 

40 materialization_results_path="results" 

41 ), 

42 vfs=VFSConfig( 

43 intermediate_backend=Backend.MEMORY, 

44 materialization_backend=MaterializationBackend.ZARR 

45 ), 

46 zarr=ZarrConfig( 

47 store_name="images.zarr", 

48 compressor=ZarrCompressor.ZSTD, 

49 compression_level=1, 

50 shuffle=True, 

51 chunk_strategy=ZarrChunkStrategy.SINGLE, 

52 ome_zarr_metadata=True, 

53 write_plate_metadata=True 

54 ), 

55 microscope=Microscope.AUTO, 

56 use_threading=None 

57 ) 

58 

59 # Pipeline steps 

60 pipeline_data = {} 

61 

62 # Steps for plate: 20250528-new-f04-analogs-n1-2-Plate-1_Plate_23318 

63 steps = [] 

64 

65 # Step 1: preprocess1 

66 step_1 = FunctionStep( 

67 func=[ 

68 (stack_percentile_normalize, { 

69 'low_percentile': '1.0', 

70 'high_percentile': '99.0', 

71 'target_max': '65535.0' 

72 }), 

73 (tophat, { 

74 'selem_radius': '50', 

75 'downsample_factor': '4' 

76 }) 

77 ], 

78 name="preprocess1", 

79 variable_components=[VariableComponents.SITE], 

80 force_disk_output=False 

81 ) 

82 steps.append(step_1) 

83 

84 # Step 2: composite 

85 step_2 = FunctionStep( 

86 func=[ 

87 (create_composite, {}) 

88 ], 

89 name="composite", 

90 variable_components=[VariableComponents.CHANNEL], 

91 force_disk_output=False 

92 ) 

93 steps.append(step_2) 

94 

95 # Step 3: find_stitch_positions 

96 step_3 = FunctionStep( 

97 func=[ 

98 (ashlar_compute_tile_positions_gpu, { 

99 'overlap_ratio': '0.1', 

100 'max_shift': '15.0', 

101 'stitch_alpha': '0.2', 

102 'upsample_factor': '10', 

103 'permutation_upsample': '1', 

104 'permutation_samples': '1000', 

105 'min_permutation_samples': '10', 

106 'max_permutation_tries': '100', 

107 'window_size_factor': '0.1' 

108 }) 

109 ], 

110 name="find_stitch_positions", 

111 variable_components=[VariableComponents.SITE], 

112 force_disk_output=False 

113 ) 

114 steps.append(step_3) 

115 

116 # Step 4: preprocess2 

117 step_4 = FunctionStep( 

118 func=[ 

119 (stack_percentile_normalize, { 

120 'low_percentile': '1.0', 

121 'high_percentile': '99.0', 

122 'target_max': '65535.0' 

123 }), 

124 (tophat, { 

125 'selem_radius': '50', 

126 'downsample_factor': '4' 

127 }) 

128 ], 

129 name="preprocess2", 

130 variable_components=[VariableComponents.SITE], 

131 force_disk_output=False 

132 ) 

133 steps.append(step_4) 

134 

135 # Step 5: assemble 

136 step_5 = FunctionStep( 

137 func=[ 

138 (assemble_stack_cupy, { 

139 'blend_method': "'fixed'", 

140 'fixed_margin_ratio': '0.1', 

141 'overlap_blend_fraction': '1.0' 

142 }) 

143 ], 

144 name="assemble", 

145 variable_components=[VariableComponents.SITE], 

146 force_disk_output=True 

147 ) 

148 steps.append(step_5) 

149 

150 # Step 6: skan 

151 step_6 = FunctionStep( 

152 func={ '1': [ 

153 (count_cells_single_channel, { 

154 'min_sigma': '1.0', 

155 'max_sigma': '10.0', 

156 'num_sigma': '10', 

157 'threshold': '0.1', 

158 'overlap': '0.5', 

159 'watershed_footprint_size': '3', 

160 'watershed_min_distance': '5', 

161 'gaussian_sigma': '1.0', 

162 'median_disk_size': '1', 

163 'min_cell_area': '30', 

164 'max_cell_area': '200', 

165 'detection_method': 'DetectionMethod.WATERSHED' 

166 }) 

167 ], 

168 '2': [ 

169 (skan_axon_skeletonize_and_analyze, { 

170 'voxel_spacing': '(1.0, 1.0, 1.0)', 

171 'min_object_size': '100', 

172 'min_branch_length': '10.0', 

173 'analysis_dimension': 'AnalysisDimension.TWO_D' 

174 }) 

175 ] 

176 }, 

177 name="skan", 

178 variable_components=[VariableComponents.SITE], 

179 force_disk_output=False 

180 ) 

181 steps.append(step_6) 

182 

183 pipeline_data["/home/ts/nvme_usb/IMX/20250528-new-f04-analogs-n1-2-Plate-1_Plate_23318"] = steps 

184 

185 return plate_paths, pipeline_data, global_config 

186 

187def setup_signal_handlers(): 

188 """Setup signal handlers to kill all child processes and threads on Ctrl+C.""" 

189 import signal 

190 import os 

191 import sys 

192 

193 def cleanup_and_exit(signum, frame): 

194 print(f"\n🔥 Signal {signum} received! Cleaning up all processes and threads...") 

195 

196 os._exit(1) 

197 

198 signal.signal(signal.SIGINT, cleanup_and_exit) 

199 signal.signal(signal.SIGTERM, cleanup_and_exit) 

200 

201def run_pipeline(): 

202 os.environ["OPENHCS_SUBPROCESS_MODE"] = "1" 

203 plate_paths, pipeline_data, global_config = create_pipeline() 

204 from openhcs.core.orchestrator.gpu_scheduler import setup_global_gpu_registry 

205 setup_global_gpu_registry(global_config=global_config) 

206 for plate_path in plate_paths: 

207 orchestrator = PipelineOrchestrator(plate_path, global_config=global_config) 

208 orchestrator.initialize() 

209 compiled_contexts = orchestrator.compile_pipelines(pipeline_data[plate_path]) 

210 orchestrator.execute_compiled_plate( 

211 pipeline_definition=pipeline_data[plate_path], 

212 compiled_contexts=compiled_contexts, 

213 max_workers=global_config.num_workers 

214 ) 

215 

216if __name__ == "__main__": 

217 setup_signal_handlers() 

218 run_pipeline()