Coverage for openhcs/debug/example_export_clean.py: 0.0%
50 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-04 02:09 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-04 02:09 +0000
1#!/usr/bin/env python3
2"""
3OpenHCS Pipeline Script - Generated from tmpypa319tx.pkl
4Generated: 2025-07-21 13:37:01.538763
5"""
7import sys
8import os
10# Add OpenHCS to path
11sys.path.insert(0, "/home/ts/code/projects/openhcs")
13from openhcs.core.orchestrator.orchestrator import PipelineOrchestrator
14from openhcs.core.steps.function_step import FunctionStep
15from openhcs.core.config import (GlobalPipelineConfig, PathPlanningConfig, VFSConfig, ZarrConfig,
16 MaterializationBackend, ZarrCompressor, ZarrChunkStrategy)
17from openhcs.constants.constants import VariableComponents, Backend, Microscope
19# Function imports
20from openhcs.processing.backends.analysis.cell_counting_cpu import count_cells_single_channel
21from openhcs.processing.backends.analysis.skan_axon_analysis import skan_axon_skeletonize_and_analyze
22from openhcs.processing.backends.assemblers.assemble_stack_cupy import assemble_stack_cupy
23from openhcs.processing.backends.pos_gen.ashlar_main_gpu import ashlar_compute_tile_positions_gpu
24from openhcs.processing.backends.processors.cupy_processor import create_composite, stack_percentile_normalize, tophat
25from openhcs.processing.backends.processors.torch_processor import stack_percentile_normalize
27def create_pipeline():
28 """Create and return the pipeline configuration."""
30 # Plate paths
31 plate_paths = ['/home/ts/nvme_usb/IMX/20250528-new-f04-analogs-n1-2-Plate-1_Plate_23318']
33 # Global configuration
34 global_config = GlobalPipelineConfig(
35 num_workers=5,
36 path_planning=PathPlanningConfig(
37 output_dir_suffix="_stitched",
38 global_output_folder="/home/ts/nvme_usb/OpenHCS/",
39 materialization_results_path="results"
40 ),
41 vfs=VFSConfig(
42 intermediate_backend=Backend.MEMORY,
43 materialization_backend=MaterializationBackend.ZARR
44 ),
45 zarr=ZarrConfig(
46 compressor=ZarrCompressor.ZSTD,
47 compression_level=1,
48 chunk_strategy=ZarrChunkStrategy.WELL
49 ),
50 microscope=Microscope.AUTO,
51 use_threading=None
52 )
54 # Pipeline steps
55 pipeline_data = {}
57 # Steps for plate: 20250528-new-f04-analogs-n1-2-Plate-1_Plate_23318
58 steps = []
60 # Step 1: preprocess1
61 step_1 = FunctionStep(
62 func=[
63 (stack_percentile_normalize, {
64 'low_percentile': '1.0',
65 'high_percentile': '99.0',
66 'target_max': '65535.0'
67 }),
68 (tophat, {
69 'selem_radius': '50',
70 'downsample_factor': '4'
71 })
72 ],
73 name="preprocess1",
74 variable_components=[VariableComponents.SITE],
75 force_disk_output=False
76 )
77 steps.append(step_1)
79 # Step 2: composite
80 step_2 = FunctionStep(
81 func=[
82 (create_composite, {})
83 ],
84 name="composite",
85 variable_components=[VariableComponents.CHANNEL],
86 force_disk_output=False
87 )
88 steps.append(step_2)
90 # Step 3: find_stitch_positions
91 step_3 = FunctionStep(
92 func=[
93 (ashlar_compute_tile_positions_gpu, {
94 'overlap_ratio': '0.1',
95 'max_shift': '15.0',
96 'stitch_alpha': '0.2',
97 'upsample_factor': '10',
98 'permutation_upsample': '1',
99 'permutation_samples': '1000',
100 'min_permutation_samples': '10',
101 'max_permutation_tries': '100',
102 'window_size_factor': '0.1'
103 })
104 ],
105 name="find_stitch_positions",
106 variable_components=[VariableComponents.SITE],
107 force_disk_output=False
108 )
109 steps.append(step_3)
111 # Step 4: preprocess2
112 step_4 = FunctionStep(
113 func=[
114 (stack_percentile_normalize, {
115 'low_percentile': '1.0',
116 'high_percentile': '99.0',
117 'target_max': '65535.0'
118 }),
119 (tophat, {
120 'selem_radius': '50',
121 'downsample_factor': '4'
122 })
123 ],
124 name="preprocess2",
125 variable_components=[VariableComponents.SITE],
126 force_disk_output=False
127 )
128 steps.append(step_4)
130 # Step 5: assemble
131 step_5 = FunctionStep(
132 func=[
133 (assemble_stack_cupy, {
134 'blend_method': "'fixed'",
135 'fixed_margin_ratio': '0.1',
136 'overlap_blend_fraction': '1.0'
137 })
138 ],
139 name="assemble",
140 variable_components=[VariableComponents.SITE],
141 force_disk_output=True
142 )
143 steps.append(step_5)
145 # Step 6: skan
146 step_6 = FunctionStep(
147 func={ '1': [
148 (count_cells_single_channel, {
149 'min_sigma': '1.0',
150 'max_sigma': '10.0',
151 'num_sigma': '10',
152 'threshold': '0.1',
153 'overlap': '0.5',
154 'watershed_footprint_size': '3',
155 'watershed_min_distance': '5',
156 'gaussian_sigma': '1.0',
157 'median_disk_size': '1',
158 'min_cell_area': '30',
159 'max_cell_area': '200',
160 'detection_method': 'DetectionMethod.WATERSHED'
161 })
162 ],
163 '2': [
164 (skan_axon_skeletonize_and_analyze, {
165 'voxel_spacing': '(1.0, 1.0, 1.0)',
166 'min_object_size': '100',
167 'min_branch_length': '10.0',
168 'analysis_dimension': 'AnalysisDimension.TWO_D'
169 })
170 ]
171 },
172 name="skan",
173 variable_components=[VariableComponents.SITE],
174 force_disk_output=False
175 )
176 steps.append(step_6)
178 pipeline_data["/home/ts/nvme_usb/IMX/20250528-new-f04-analogs-n1-2-Plate-1_Plate_23318"] = steps
180 return plate_paths, pipeline_data, global_config
182def setup_signal_handlers():
183 """Setup signal handlers to kill all child processes and threads on Ctrl+C."""
184 import signal
185 import os
187 def cleanup_and_exit(signum, frame):
188 print(f"\n🔥 Signal {signum} received! Cleaning up all processes and threads...")
190 os._exit(1)
192 signal.signal(signal.SIGINT, cleanup_and_exit)
193 signal.signal(signal.SIGTERM, cleanup_and_exit)
195def run_pipeline():
196 os.environ["OPENHCS_SUBPROCESS_MODE"] = "1"
197 plate_paths, pipeline_data, global_config = create_pipeline()
198 from openhcs.core.orchestrator.gpu_scheduler import setup_global_gpu_registry
199 setup_global_gpu_registry(global_config=global_config)
200 for plate_path in plate_paths:
201 orchestrator = PipelineOrchestrator(plate_path)
202 orchestrator.initialize()
203 compiled_contexts = orchestrator.compile_pipelines(pipeline_data[plate_path])
204 orchestrator.execute_compiled_plate(
205 pipeline_definition=pipeline_data[plate_path],
206 compiled_contexts=compiled_contexts,
207 max_workers=global_config.num_workers
208 )
210if __name__ == "__main__":
211 setup_signal_handlers()
212 run_pipeline()