Coverage for openhcs/processing/backends/processors/percentile_utils.py: 5.9%

61 statements  

« prev     ^ index     » next       coverage.py v7.10.7, created at 2025-10-01 18:33 +0000

1""" 

2Shared utilities for percentile normalization across all backends. 

3 

4This module provides common functionality to ensure consistent behavior 

5between NumPy, CuPy, PyTorch, JAX, TensorFlow, and other implementations. 

6""" 

7 

8import numpy as np 

9from typing import Tuple, Any, Union 

10 

11 

12def get_dtype_range(dtype) -> Tuple[Union[int, float], Union[int, float]]: 

13 """ 

14 Get the natural min/max range for a numpy-compatible dtype. 

15  

16 Args: 

17 dtype: NumPy dtype or equivalent (works with CuPy, PyTorch, etc.) 

18  

19 Returns: 

20 Tuple of (min_value, max_value) for the dtype 

21 """ 

22 # Convert to numpy dtype for consistent comparison 

23 if hasattr(dtype, 'type'): 

24 # Handle CuPy/PyTorch dtypes that have .type attribute 

25 np_dtype = dtype.type 

26 else: 

27 # Handle direct numpy dtypes 

28 np_dtype = dtype 

29 

30 # Map dtypes to their natural ranges 

31 if np_dtype == np.uint8: 

32 return 0, 255 

33 elif np_dtype == np.uint16: 

34 return 0, 65535 

35 elif np_dtype == np.uint32: 

36 return 0, 4294967295 

37 elif np_dtype == np.uint64: 

38 return 0, 18446744073709551615 

39 elif np_dtype == np.int8: 

40 return -128, 127 

41 elif np_dtype == np.int16: 

42 return -32768, 32767 

43 elif np_dtype == np.int32: 

44 return -2147483648, 2147483647 

45 elif np_dtype == np.int64: 

46 return -9223372036854775808, 9223372036854775807 

47 elif np_dtype in (np.float16, np.float32, np.float64): 

48 return 0.0, 1.0 

49 else: 

50 # Fallback for unknown dtypes - assume 16-bit range 

51 return 0, 65535 

52 

53 

54def resolve_target_range(stack_dtype, target_min=None, target_max=None) -> Tuple[Union[int, float], Union[int, float]]: 

55 """ 

56 Resolve target min/max values, auto-detecting from dtype if not specified. 

57  

58 Args: 

59 stack_dtype: The dtype of the input stack 

60 target_min: Explicit target minimum (None for auto-detection) 

61 target_max: Explicit target maximum (None for auto-detection) 

62  

63 Returns: 

64 Tuple of (resolved_min, resolved_max) 

65 """ 

66 if target_min is None or target_max is None: 

67 auto_min, auto_max = get_dtype_range(stack_dtype) 

68 if target_min is None: 

69 target_min = auto_min 

70 if target_max is None: 

71 target_max = auto_max 

72 

73 return target_min, target_max 

74 

75 

76def percentile_normalize_core( 

77 stack, 

78 low_percentile: float, 

79 high_percentile: float, 

80 target_min: Union[int, float], 

81 target_max: Union[int, float], 

82 percentile_func, 

83 clip_func, 

84 ones_like_func, 

85 preserve_dtype: bool = True 

86): 

87 """ 

88 Core percentile normalization logic that works with any array backend. 

89  

90 This function contains the shared algorithm while allowing different backends 

91 to provide their own array operations (percentile, clip, ones_like). 

92  

93 Args: 

94 stack: Input array (NumPy, CuPy, PyTorch, etc.) 

95 low_percentile: Lower percentile (0-100) 

96 high_percentile: Upper percentile (0-100) 

97 target_min: Target minimum value 

98 target_max: Target maximum value 

99 percentile_func: Backend-specific percentile function 

100 clip_func: Backend-specific clip function 

101 ones_like_func: Backend-specific ones_like function 

102 preserve_dtype: Whether to preserve input dtype 

103  

104 Returns: 

105 Normalized array with same backend as input 

106 """ 

107 # Calculate global percentiles across the entire stack 

108 p_low = percentile_func(stack, low_percentile) 

109 p_high = percentile_func(stack, high_percentile) 

110 

111 # Avoid division by zero 

112 if p_high == p_low: 

113 result = ones_like_func(stack) * target_min 

114 if preserve_dtype: 

115 return result.astype(stack.dtype) 

116 else: 

117 # Legacy behavior: convert to uint16-equivalent 

118 return result.astype(stack.dtype if hasattr(stack, 'dtype') else type(stack)) 

119 

120 # Clip and normalize to target range 

121 clipped = clip_func(stack, p_low, p_high) 

122 normalized = (clipped - p_low) * (target_max - target_min) / (p_high - p_low) + target_min 

123 

124 # Handle dtype conversion 

125 if preserve_dtype: 

126 return normalized.astype(stack.dtype) 

127 else: 

128 # Legacy behavior: convert to uint16-equivalent for the backend 

129 if hasattr(stack, 'dtype'): 

130 # For NumPy/CuPy arrays 

131 return normalized.astype(np.uint16 if 'numpy' in str(type(stack)) else stack.dtype) 

132 else: 

133 # For other backends, preserve original type 

134 return normalized.astype(type(stack)) 

135 

136 

137def slice_percentile_normalize_core( 

138 image, 

139 low_percentile: float, 

140 high_percentile: float, 

141 target_min: Union[int, float], 

142 target_max: Union[int, float], 

143 percentile_func, 

144 clip_func, 

145 ones_like_func, 

146 zeros_like_func, 

147 preserve_dtype: bool = True 

148): 

149 """ 

150 Core slice-by-slice percentile normalization logic. 

151  

152 Args: 

153 image: Input 3D array (Z, Y, X) 

154 low_percentile: Lower percentile (0-100) 

155 high_percentile: Upper percentile (0-100) 

156 target_min: Target minimum value 

157 target_max: Target maximum value 

158 percentile_func: Backend-specific percentile function 

159 clip_func: Backend-specific clip function 

160 ones_like_func: Backend-specific ones_like function 

161 zeros_like_func: Backend-specific zeros_like function 

162 preserve_dtype: Whether to preserve input dtype 

163  

164 Returns: 

165 Normalized array with same backend as input 

166 """ 

167 # Process each Z-slice independently 

168 # Use float32 for intermediate calculations to avoid precision loss 

169 result = zeros_like_func(image, dtype=np.float32 if hasattr(image, 'dtype') else None) 

170 

171 for z in range(image.shape[0]): 

172 # Get percentile values for this slice 

173 p_low, p_high = percentile_func(image[z], (low_percentile, high_percentile)) 

174 

175 # Avoid division by zero 

176 if p_high == p_low: 

177 result[z] = ones_like_func(image[z]) * target_min 

178 continue 

179 

180 # Clip and normalize to target range 

181 clipped = clip_func(image[z], p_low, p_high) 

182 normalized = (clipped - p_low) * (target_max - target_min) / (p_high - p_low) + target_min 

183 result[z] = normalized 

184 

185 # Handle dtype conversion 

186 if preserve_dtype: 

187 return result.astype(image.dtype) 

188 else: 

189 # Legacy behavior: convert to uint16-equivalent 

190 return result.astype(np.uint16 if hasattr(result, 'astype') else type(result))