Coverage for openhcs/core/memory/utils.py: 10.3%
96 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-04 02:09 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-04 02:09 +0000
1"""
2Memory conversion utility functions for OpenHCS.
4This module provides utility functions for memory conversion operations,
5supporting Clause 251 (Declarative Memory Conversion Interface) and
6Clause 65 (Fail Loudly).
7"""
9import importlib
10import logging
11from typing import Any, Optional
13from openhcs.constants.constants import MemoryType
15from .exceptions import MemoryConversionError
16from .framework_config import _FRAMEWORK_CONFIG
18logger = logging.getLogger(__name__)
21def _ensure_module(module_name: str) -> Any:
22 """
23 Ensure a module is imported and meets version requirements.
25 Args:
26 module_name: The name of the module to import
28 Returns:
29 The imported module
31 Raises:
32 ImportError: If the module cannot be imported or does not meet version requirements
33 RuntimeError: If the module has known issues with specific versions
34 """
35 try:
36 module = importlib.import_module(module_name)
38 # Check TensorFlow version for DLPack compatibility
39 if module_name == "tensorflow":
40 import pkg_resources
41 tf_version = pkg_resources.parse_version(module.__version__)
42 min_version = pkg_resources.parse_version("2.12.0")
44 if tf_version < min_version:
45 raise RuntimeError(
46 f"TensorFlow version {module.__version__} is not supported for DLPack operations. "
47 f"Version 2.12.0 or higher is required for stable DLPack support. "
48 f"Clause 88 (No Inferred Capabilities) violation: Cannot infer DLPack capability."
49 )
51 return module
52 except ImportError:
53 raise ImportError(f"Module {module_name} is required for this operation but is not installed")
56def _supports_cuda_array_interface(obj: Any) -> bool:
57 """
58 Check if an object supports the CUDA Array Interface.
60 Args:
61 obj: The object to check
63 Returns:
64 True if the object supports the CUDA Array Interface, False otherwise
65 """
66 return hasattr(obj, "__cuda_array_interface__")
69def _supports_dlpack(obj: Any) -> bool:
70 """
71 Check if an object supports DLPack.
73 Args:
74 obj: The object to check
76 Returns:
77 True if the object supports DLPack, False otherwise
79 Note:
80 For TensorFlow tensors, this function enforces Clause 88 (No Inferred Capabilities)
81 by explicitly checking:
82 1. TensorFlow version must be 2.12+ for stable DLPack support
83 2. Tensor must be on GPU (CPU tensors might succeed even without proper DLPack support)
84 3. tf.experimental.dlpack module must exist
85 """
86 # Check for PyTorch, CuPy, or JAX DLPack support
87 # PyTorch: __dlpack__ method, CuPy: toDlpack method, JAX: __dlpack__ method
88 if hasattr(obj, "toDlpack") or hasattr(obj, "to_dlpack") or hasattr(obj, "__dlpack__"):
89 # Special handling for TensorFlow to enforce Clause 88
90 if 'tensorflow' in str(type(obj)):
91 try:
92 import tensorflow as tf
94 # Check TensorFlow version - DLPack is only stable in TF 2.12+
95 tf_version = tf.__version__
96 major, minor = map(int, tf_version.split('.')[:2])
98 if major < 2 or (major == 2 and minor < 12):
99 # Explicitly fail for TF < 2.12 to prevent silent fallbacks
100 raise RuntimeError(
101 f"TensorFlow version {tf_version} does not support stable DLPack operations. "
102 f"Version 2.12.0 or higher is required. "
103 f"Clause 88 violation: Cannot infer DLPack capability."
104 )
106 # Check if tensor is on GPU - CPU tensors might succeed even without proper DLPack support
107 device_str = obj.device.lower()
108 if "gpu" not in device_str:
109 # Explicitly fail for CPU tensors to prevent deceptive behavior
110 raise RuntimeError(
111 "TensorFlow tensor on CPU cannot use DLPack operations reliably. "
112 "Only GPU tensors are supported for DLPack operations. "
113 "Clause 88 violation: Cannot infer GPU capability."
114 )
116 # Check if experimental.dlpack module exists
117 if not hasattr(tf.experimental, "dlpack"):
118 raise RuntimeError(
119 "TensorFlow installation missing experimental.dlpack module. "
120 "Clause 88 violation: Cannot infer DLPack capability."
121 )
123 return True
124 except (ImportError, AttributeError) as e:
125 # Re-raise with more specific error message
126 raise RuntimeError(
127 f"TensorFlow DLPack support check failed: {str(e)}. "
128 f"Clause 88 violation: Cannot infer DLPack capability."
129 ) from e
131 # For non-TensorFlow types, return True if they have DLPack methods
132 return True
134 return False
137# NOTE: Device operations now defined in framework_config.py
138# This eliminates the scattered _DEVICE_OPS dict
141def _get_device_id(data: Any, memory_type: str) -> Optional[int]:
142 """
143 Get the GPU device ID from a data object using framework config.
145 Args:
146 data: The data object
147 memory_type: The memory type
149 Returns:
150 The GPU device ID or None if not applicable
152 Raises:
153 MemoryConversionError: If the device ID cannot be determined for a GPU memory type
154 """
155 # Convert string to enum
156 mem_type = MemoryType(memory_type)
157 config = _FRAMEWORK_CONFIG[mem_type]
158 get_id_handler = config['get_device_id']
160 # Check if it's a callable handler (pyclesperanto)
161 if callable(get_id_handler):
162 mod = _ensure_module(mem_type.value)
163 return get_id_handler(data, mod)
165 # Check if it's None (CPU)
166 if get_id_handler is None:
167 return None
169 # It's an eval expression
170 try:
171 mod = _ensure_module(mem_type.value) # noqa: F841 (used in eval)
172 return eval(get_id_handler)
173 except (AttributeError, Exception) as e:
174 logger.warning(f"Failed to get device ID for {mem_type.value} array: {e}")
175 # Try fallback if available
176 if 'get_device_id_fallback' in config:
177 return eval(config['get_device_id_fallback'])
180def _set_device(memory_type: str, device_id: int) -> None:
181 """
182 Set the current device for a specific memory type using framework config.
184 Args:
185 memory_type: The memory type
186 device_id: The GPU device ID
188 Raises:
189 MemoryConversionError: If the device cannot be set
190 """
191 # Convert string to enum
192 mem_type = MemoryType(memory_type)
193 config = _FRAMEWORK_CONFIG[mem_type]
194 set_device_handler = config['set_device']
196 # Check if it's a callable handler (pyclesperanto)
197 if callable(set_device_handler):
198 try:
199 mod = _ensure_module(mem_type.value)
200 set_device_handler(device_id, mod)
201 except Exception as e:
202 raise MemoryConversionError(
203 source_type=memory_type,
204 target_type=memory_type,
205 method="device_selection",
206 reason=f"Failed to set {mem_type.value} device to {device_id}: {e}"
207 ) from e
208 return
210 # Check if it's None (frameworks that don't need global device setting)
211 if set_device_handler is None:
212 return
214 # It's an eval expression
215 try:
216 mod = _ensure_module(mem_type.value) # noqa: F841 (used in eval)
217 eval(set_device_handler.format(mod='mod'))
218 except Exception as e:
219 raise MemoryConversionError(
220 source_type=memory_type,
221 target_type=memory_type,
222 method="device_selection",
223 reason=f"Failed to set {mem_type.value} device to {device_id}: {e}"
224 ) from e
227def _move_to_device(data: Any, memory_type: str, device_id: int) -> Any:
228 """
229 Move data to a specific GPU device using framework config.
231 Args:
232 data: The data to move
233 memory_type: The memory type
234 device_id: The target GPU device ID
236 Returns:
237 The data on the target device
239 Raises:
240 MemoryConversionError: If the data cannot be moved to the specified device
241 """
242 # Convert string to enum
243 mem_type = MemoryType(memory_type)
244 config = _FRAMEWORK_CONFIG[mem_type]
245 move_handler = config['move_to_device']
247 # Check if it's a callable handler (pyclesperanto)
248 if callable(move_handler):
249 try:
250 mod = _ensure_module(mem_type.value)
251 return move_handler(data, device_id, mod, memory_type)
252 except Exception as e:
253 raise MemoryConversionError(
254 source_type=memory_type,
255 target_type=memory_type,
256 method="device_movement",
257 reason=f"Failed to move {mem_type.value} array to device {device_id}: {e}"
258 ) from e
260 # Check if it's None (CPU memory types)
261 if move_handler is None:
262 return data
264 # It's an eval expression
265 try:
266 mod = _ensure_module(mem_type.value) # noqa: F841 (used in eval)
268 # Handle context managers (CuPy, TensorFlow)
269 if 'move_context' in config and config['move_context']:
270 context_expr = config['move_context'].format(mod='mod')
271 context = eval(context_expr)
272 with context:
273 return eval(move_handler.format(mod='mod'))
274 else:
275 return eval(move_handler.format(mod='mod'))
276 except Exception as e:
277 raise MemoryConversionError(
278 source_type=memory_type,
279 target_type=memory_type,
280 method="device_movement",
281 reason=f"Failed to move {mem_type.value} array to device {device_id}: {e}"
282 ) from e