Coverage for openhcs/textual_tui/widgets/reactive_log_monitor.py: 0.0%
224 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-04 02:09 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-04 02:09 +0000
1"""
2OpenHCS Reactive Log Monitor Widget
4A clean, low-entropy reactive log monitoring system that provides real-time
5log viewing with a dropdown selector interface.
7Mathematical properties:
8- Reactive: UI updates are pure functions of file system events
9- Monotonic: Logs only get added during execution
10- Deterministic: Same file system state always produces same UI
11"""
13import logging
14from pathlib import Path
15from typing import Set, Dict, List
17from textual.app import ComposeResult
18from textual.containers import Container
19from textual.widgets import Static, Select
20from textual.widget import Widget
21from textual.reactive import reactive
23# Import file system watching
24from watchdog.observers import Observer
25from watchdog.events import FileSystemEventHandler
27# Import core log utilities
28from openhcs.core.log_utils import LogFileInfo, discover_logs, classify_log_file, is_relevant_log_file
30# Toolong components are imported in ToolongWidget
32logger = logging.getLogger(__name__)
38class ReactiveLogFileHandler(FileSystemEventHandler):
39 """File system event handler for reactive log monitoring."""
41 def __init__(self, monitor: 'ReactiveLogMonitor'):
42 self.monitor = monitor
44 def on_created(self, event):
45 """Handle file creation events."""
46 if not event.is_directory and event.src_path.endswith('.log'):
47 file_path = Path(event.src_path)
48 if is_relevant_log_file(file_path, self.monitor.base_log_path):
49 logger.debug(f"Log file created: {file_path}")
50 self.monitor._handle_log_file_created(file_path)
52 def on_modified(self, event):
53 """Handle file modification events."""
54 if not event.is_directory and event.src_path.endswith('.log'):
55 file_path = Path(event.src_path)
56 if is_relevant_log_file(file_path, self.monitor.base_log_path):
57 self.monitor._handle_log_file_modified(file_path)
60class ReactiveLogMonitor(Widget):
61 """
62 Reactive log monitor with dropdown selector.
64 Provides real-time monitoring of OpenHCS log files with a clean dropdown
65 interface for selecting which log to view.
66 """
68 # Reactive properties
69 active_logs: reactive[Set[Path]] = reactive(set())
70 base_log_path: reactive[str] = reactive("")
72 def __init__(
73 self,
74 base_log_path: str = "",
75 auto_start: bool = True,
76 include_tui_log: bool = True,
77 **kwargs
78 ):
79 """
80 Initialize ReactiveLogMonitor.
82 Args:
83 base_log_path: Base path for subprocess log files
84 auto_start: Whether to automatically start monitoring when mounted
85 include_tui_log: Whether to include the current TUI process log
86 """
87 super().__init__(**kwargs)
88 self.base_log_path = base_log_path
89 self.auto_start = auto_start
90 self.include_tui_log = include_tui_log
92 # Internal state
93 self._log_info_cache: Dict[Path, LogFileInfo] = {}
94 # ToolongWidget will manage its own watcher
96 # File system watcher (will be set up in on_mount)
97 self._file_observer = None
99 def compose(self) -> ComposeResult:
100 """Compose the reactive log monitor layout with dropdown selector."""
101 # Simple layout like other widgets - no complex containers
102 yield Static("Log File:")
103 yield Select(
104 options=[("Loading...", "loading")],
105 value="loading",
106 id="log_selector",
107 compact=True
108 )
109 yield Container(id="log_view_container")
111 def on_mount(self) -> None:
112 """Set up log monitoring when widget is mounted."""
113 logger.debug(f"ReactiveLogMonitor.on_mount() called, auto_start={self.auto_start}")
114 if self.auto_start:
115 logger.debug("Starting monitoring from on_mount")
116 self.start_monitoring()
117 else:
118 logger.warning("Auto-start disabled, not starting monitoring")
120 def on_unmount(self) -> None:
121 """Clean up when widget is unmounted."""
122 logger.debug("ReactiveLogMonitor unmounting, cleaning up watchers...")
123 self.stop_monitoring()
125 def start_monitoring(self, base_log_path: str = None) -> None:
126 """
127 Start monitoring for log files.
129 Args:
130 base_log_path: Optional new base path to monitor
131 """
132 logger.debug(f"start_monitoring() called with base_log_path='{base_log_path}'")
134 if base_log_path:
135 self.base_log_path = base_log_path
137 logger.debug(f"Current state: base_log_path='{self.base_log_path}', include_tui_log={self.include_tui_log}")
139 # We can monitor even without base_log_path if include_tui_log is True
140 if not self.base_log_path and not self.include_tui_log:
141 raise RuntimeError("Cannot start log monitoring: no base log path and TUI log disabled")
143 if self.base_log_path:
144 logger.debug(f"Starting reactive log monitoring for subprocess: {self.base_log_path}")
145 else:
146 logger.debug("Starting reactive log monitoring for TUI log only")
148 # Discover existing logs - THIS SHOULD CRASH IF NO TUI LOG FOUND
149 logger.debug("About to discover existing logs...")
150 self._discover_existing_logs()
151 logger.debug("Finished discovering existing logs")
153 # Start file system watcher (only if we have subprocess logs to watch)
154 if self.base_log_path:
155 self._start_file_watcher()
157 logger.debug("Log monitoring started successfully")
159 def stop_monitoring(self) -> None:
160 """Stop all log monitoring with proper thread cleanup."""
161 logger.debug("Stopping reactive log monitoring")
163 try:
164 # Stop file system watcher first
165 self._stop_file_watcher()
166 except Exception as e:
167 logger.error(f"Error stopping file watcher: {e}")
169 # ToolongWidget now manages its own watcher, so no need to stop it here
170 logger.debug("ReactiveLogMonitor stopped (ToolongWidget manages its own watcher)")
172 # Clear state
173 self.active_logs = set()
174 self._log_info_cache.clear()
176 logger.debug("Reactive log monitoring stopped")
186 def _discover_existing_logs(self) -> None:
187 """Discover and add existing log files."""
188 discovered = discover_logs(self.base_log_path, self.include_tui_log)
189 for log_path in discovered:
190 self._add_log_file(log_path)
192 def _add_log_file(self, log_path: Path) -> None:
193 """Add a log file to monitoring (internal method)."""
194 if log_path in self.active_logs:
195 return # Already monitoring
197 # Classify the log file
198 log_info = classify_log_file(log_path, self.base_log_path, self.include_tui_log)
199 self._log_info_cache[log_path] = log_info
201 # Add to active logs (triggers reactive update)
202 new_logs = set(self.active_logs)
203 new_logs.add(log_path)
204 self.active_logs = new_logs
206 logger.debug(f"Added log file to monitoring: {log_info.display_name} ({log_path})")
208 def watch_active_logs(self, logs: Set[Path]) -> None:
209 """Reactive: Update dropdown when active logs change."""
210 logger.debug(f"Active logs changed: {len(logs)} logs")
211 # Always try to update - the _update_log_selector method has its own safety checks
212 logger.debug("Updating log selector")
213 self._update_log_selector()
215 def _update_log_selector(self) -> None:
216 """Update dropdown selector with available logs."""
217 logger.debug(f"_update_log_selector called, is_mounted={self.is_mounted}")
219 try:
220 # Check if the selector exists (might not be ready yet or removed during unmount)
221 try:
222 log_selector = self.query_one("#log_selector", Select)
223 logger.debug(f"Found log selector widget: {log_selector}")
224 except Exception as e:
225 logger.debug(f"Log selector not found (widget not ready or unmounting?): {e}")
226 return
227 logger.debug(f"Found log selector widget: {log_selector}")
229 # Sort logs: TUI first, then main subprocess, then workers by well ID
230 sorted_logs = self._sort_logs_for_display(self.active_logs)
231 logger.debug(f"Active logs: {[str(p) for p in self.active_logs]}")
232 logger.debug(f"Sorted logs: {[str(p) for p in sorted_logs]}")
234 # Build dropdown options
235 options = []
236 for log_path in sorted_logs:
237 log_info = self._log_info_cache.get(log_path)
238 logger.debug(f"Log path {log_path} -> log_info: {log_info}")
239 if log_info:
240 options.append((log_info.display_name, str(log_path)))
241 else:
242 logger.warning(f"No log_info found for {log_path} in cache: {list(self._log_info_cache.keys())}")
244 logger.debug(f"Built options: {options}")
246 if not options:
247 logger.error("CRITICAL: No options built! This should never happen with TUI log.")
248 options = [("No logs available", "none")]
250 # Update selector options
251 log_selector.set_options(options)
252 logger.debug(f"Set options on selector, current value: {log_selector.value}")
254 # Force refresh the selector
255 log_selector.refresh()
256 logger.debug("Forced selector refresh")
258 # Auto-select first option (TUI log) if nothing selected
259 if options and (log_selector.value == "loading" or log_selector.value not in [opt[1] for opt in options]):
260 logger.debug(f"Auto-selecting first option: {options[0]}")
261 log_selector.value = options[0][1]
262 logger.debug(f"About to show log file: {options[0][1]}")
263 self._show_log_file(Path(options[0][1]))
264 logger.debug(f"Finished showing log file: {options[0][1]}")
265 else:
266 logger.debug("Not auto-selecting, current selection is valid")
268 except Exception as e:
269 # FAIL LOUD - UI updates should not silently fail
270 raise RuntimeError(f"Failed to update log selector: {e}") from e
272 def on_select_changed(self, event: Select.Changed) -> None:
273 """Handle log file selection change."""
274 logger.debug(f"Select changed: value={event.value}, type={type(event.value)}")
276 # Handle NoSelection/BLANK - this should not happen if we always have TUI log
277 if event.value == Select.BLANK or event.value is None:
278 logger.error("CRITICAL: Select widget has no selection! This should never happen.")
279 return
281 # Handle valid selections
282 if event.value and event.value != "loading" and event.value != "none":
283 logger.debug(f"Showing log file: {event.value}")
284 self._show_log_file(Path(event.value))
285 else:
286 logger.warning(f"Ignoring invalid selection: {event.value}")
288 def _show_log_file(self, log_path: Path) -> None:
289 """Show the selected log file using proper Toolong structure."""
290 logger.debug(f"_show_log_file called with: {log_path}")
291 try:
292 log_container = self.query_one("#log_view_container", Container)
293 logger.debug(f"Found log container: {log_container}")
295 # Clear existing content
296 existing_widgets = log_container.query("*")
297 logger.debug(f"Clearing {len(existing_widgets)} existing widgets")
298 existing_widgets.remove()
300 # Create complete ToolongWidget - this encapsulates all Toolong functionality
301 from openhcs.textual_tui.widgets.toolong_widget import ToolongWidget
303 logger.debug(f"Creating ToolongWidget for: {log_path}")
305 # Create ToolongWidget for the selected file
306 toolong_widget = ToolongWidget.from_single_file(
307 str(log_path),
308 can_tail=True
309 )
310 logger.debug(f"Created ToolongWidget: {toolong_widget}")
312 # Mount the complete ToolongWidget
313 logger.debug("Mounting ToolongWidget to container")
314 log_container.mount(toolong_widget)
316 logger.debug(f"Successfully showing log file with ToolongWidget: {log_path}")
318 except Exception as e:
319 logger.error(f"Failed to show log file {log_path}: {e}", exc_info=True)
320 # Show error message
321 try:
322 log_container = self.query_one("#log_view_container", Container)
323 log_container.query("*").remove()
324 log_container.mount(Static(f"Error loading log: {e}", classes="error-message"))
325 logger.debug("Mounted error message")
326 except Exception as e2:
327 logger.error(f"Failed to show error message: {e2}")
329 def _sort_logs_for_display(self, logs: Set[Path]) -> List[Path]:
330 """Sort logs for display: TUI first, then main subprocess, then workers by well ID."""
331 tui_logs = []
332 main_logs = []
333 worker_logs = []
334 unknown_logs = []
336 for log_path in logs:
337 log_info = self._log_info_cache.get(log_path)
338 if not log_info:
339 unknown_logs.append(log_path)
340 continue
342 if log_info.log_type == "tui":
343 tui_logs.append(log_path)
344 elif log_info.log_type == "main":
345 main_logs.append(log_path)
346 elif log_info.log_type == "worker":
347 worker_logs.append((log_info.well_id or "", log_path))
348 else:
349 unknown_logs.append(log_path)
351 # Sort workers by well ID
352 worker_logs.sort(key=lambda x: x[0])
354 return tui_logs + main_logs + [log_path for _, log_path in worker_logs] + unknown_logs
356 def _start_file_watcher(self) -> None:
357 """Start file system watcher for new log files."""
358 if not self.base_log_path:
359 logger.warning("Cannot start file watcher: no base log path")
360 return
362 base_path = Path(self.base_log_path)
363 watch_directory = base_path.parent
365 if not watch_directory.exists():
366 logger.warning(f"Watch directory does not exist: {watch_directory}")
367 return
369 try:
370 # Stop any existing watcher
371 self._stop_file_watcher()
373 # Create new watcher as daemon thread
374 self._file_observer = Observer()
375 self._file_observer.daemon = True # Don't block app shutdown
377 # Create event handler
378 event_handler = ReactiveLogFileHandler(self)
380 # Schedule watching
381 self._file_observer.schedule(
382 event_handler,
383 str(watch_directory),
384 recursive=False # Only watch the log directory, not subdirectories
385 )
387 # Start watching
388 self._file_observer.start()
390 logger.debug(f"Started file system watcher for: {watch_directory}")
392 except Exception as e:
393 logger.error(f"Failed to start file system watcher: {e}")
394 self._file_observer = None
396 def _stop_file_watcher(self) -> None:
397 """Stop file system watcher with aggressive thread cleanup."""
398 if self._file_observer:
399 try:
400 logger.debug("Stopping file system observer...")
401 self._file_observer.stop()
403 # Wait for observer thread to finish with timeout
404 logger.debug("Waiting for file system observer thread to join...")
405 self._file_observer.join(timeout=0.5) # Shorter timeout
407 if self._file_observer.is_alive():
408 logger.warning("File system observer thread did not stop cleanly, forcing cleanup")
409 # Force cleanup by setting daemon flag
410 try:
411 for thread in self._file_observer._threads:
412 if hasattr(thread, 'daemon'):
413 thread.daemon = True
414 except:
415 pass
416 else:
417 logger.debug("File system observer stopped cleanly")
419 except Exception as e:
420 logger.error(f"Error stopping file system watcher: {e}")
421 finally:
422 self._file_observer = None
424 def _handle_log_file_created(self, file_path: Path) -> None:
425 """Handle creation of a new log file."""
426 self._add_log_file(file_path)
428 def _handle_log_file_modified(self, file_path: Path) -> None:
429 """Handle modification of an existing log file."""
430 # Toolong LogView handles live tailing automatically
431 pass