Final repository
This commit is contained in:
@@ -1,9 +1,9 @@
|
||||
from PySide6.QtWidgets import (
|
||||
QMainWindow, QTabWidget, QDockWidget, QMessageBox,
|
||||
QApplication, QFileDialog, QSplashScreen, QVBoxLayout, QWidget
|
||||
QApplication, QFileDialog, QSplashScreen, QVBoxLayout, QWidget, QLabel
|
||||
)
|
||||
from PySide6.QtCore import Qt, QTimer, QSettings, QSize, Slot
|
||||
from PySide6.QtGui import QIcon, QPixmap, QAction
|
||||
from PySide6.QtGui import QIcon, QPixmap, QAction, QFont
|
||||
|
||||
import os
|
||||
import sys
|
||||
@@ -24,19 +24,26 @@ if hasattr(Qt, 'qInstallMessageHandler'):
|
||||
from ui.analytics_tab import AnalyticsTab
|
||||
from ui.violations_tab import ViolationsTab
|
||||
from ui.export_tab import ExportTab
|
||||
from ui.config_panel import ConfigPanel
|
||||
from ui.live_multi_cam_tab import LiveMultiCamTab
|
||||
from ui.video_detection_tab import VideoDetectionTab
|
||||
from ui.modern_config_panel import ModernConfigPanel
|
||||
from ui.modern_live_detection_tab import ModernLiveDetectionTab
|
||||
# from ui.video_analysis_tab import VideoAnalysisTab
|
||||
# from ui.video_detection_tab import VideoDetectionTab # Commented out - split into two separate tabs
|
||||
from ui.video_detection_only_tab import VideoDetectionOnlyTab
|
||||
from ui.smart_intersection_tab import SmartIntersectionTab
|
||||
from ui.global_status_panel import GlobalStatusPanel
|
||||
from ui.vlm_insights_widget import VLMInsightsWidget # Import the new VLM Insights Widget
|
||||
from ui.dashboard_tab import DashboardTab # Import the new Dashboard Tab
|
||||
|
||||
# Import controllers
|
||||
from controllers.video_controller_new import VideoController
|
||||
from controllers.analytics_controller import AnalyticsController
|
||||
from controllers.performance_overlay import PerformanceOverlay
|
||||
from controllers.model_manager import ModelManager
|
||||
# VLM Controller removed - functionality moved to insights widget
|
||||
|
||||
# Import utilities
|
||||
from utils.helpers import load_configuration, save_configuration, save_snapshot
|
||||
from utils.data_publisher import DataPublisher
|
||||
|
||||
class MainWindow(QMainWindow):
|
||||
"""Main application window."""
|
||||
@@ -58,6 +65,9 @@ class MainWindow(QMainWindow):
|
||||
# Connect signals and slots
|
||||
self.connectSignals()
|
||||
|
||||
# Initialize config panel with current configuration
|
||||
self.config_panel.set_config(self.config)
|
||||
|
||||
# Restore settings
|
||||
self.restoreSettings()
|
||||
|
||||
@@ -70,49 +80,134 @@ class MainWindow(QMainWindow):
|
||||
def setupUI(self):
|
||||
"""Set up the user interface"""
|
||||
# Window properties
|
||||
self.setWindowTitle("Traffic Monitoring System (OpenVINO PySide6)")
|
||||
self.setWindowTitle("Traffic Intersection Monitoring System")
|
||||
self.setMinimumSize(1200, 800)
|
||||
self.resize(1400, 900)
|
||||
|
||||
# Set up central widget with tabs
|
||||
self.tabs = QTabWidget()
|
||||
|
||||
# Style the tabs
|
||||
self.tabs.setStyleSheet("""
|
||||
QTabWidget::pane {
|
||||
border: 1px solid #444;
|
||||
background-color: #2b2b2b;
|
||||
}
|
||||
QTabBar::tab {
|
||||
background-color: #3c3c3c;
|
||||
color: white;
|
||||
padding: 8px 16px;
|
||||
margin: 2px;
|
||||
border: 1px solid #555;
|
||||
border-bottom: none;
|
||||
border-radius: 4px 4px 0px 0px;
|
||||
min-width: 120px;
|
||||
}
|
||||
QTabBar::tab:selected {
|
||||
background-color: #0078d4;
|
||||
border-color: #0078d4;
|
||||
}
|
||||
QTabBar::tab:hover {
|
||||
background-color: #4a4a4a;
|
||||
}
|
||||
QTabBar::tab:!selected {
|
||||
margin-top: 2px;
|
||||
}
|
||||
""")
|
||||
|
||||
# Create tabs
|
||||
self.live_tab = LiveMultiCamTab()
|
||||
self.video_detection_tab = VideoDetectionTab()
|
||||
self.live_tab = ModernLiveDetectionTab()
|
||||
# self.video_analysis_tab = VideoAnalysisTab()
|
||||
# self.video_detection_tab = VideoDetectionTab() # Commented out - split into two separate tabs
|
||||
self.video_detection_only_tab = VideoDetectionOnlyTab()
|
||||
self.smart_intersection_tab = SmartIntersectionTab()
|
||||
self.analytics_tab = AnalyticsTab()
|
||||
self.violations_tab = ViolationsTab()
|
||||
self.export_tab = ExportTab()
|
||||
# Remove VLM tab - VLM functionality moved to settings panel
|
||||
# self.vlm_tab = VLMTab() # Create the VLM tab
|
||||
from ui.performance_graphs import PerformanceGraphsWidget
|
||||
self.performance_tab = PerformanceGraphsWidget()
|
||||
|
||||
# Add Dashboard tab
|
||||
try:
|
||||
self.dashboard_tab = DashboardTab()
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not create Dashboard tab: {e}")
|
||||
self.dashboard_tab = None
|
||||
|
||||
# Add User Guide tab
|
||||
try:
|
||||
from ui.user_guide_tab import UserGuideTab
|
||||
self.user_guide_tab = UserGuideTab()
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not create User Guide tab: {e}")
|
||||
self.user_guide_tab = None
|
||||
|
||||
# Add tabs to tab widget
|
||||
self.tabs.addTab(self.live_tab, "Live Detection")
|
||||
self.tabs.addTab(self.video_detection_tab, "Video Detection")
|
||||
self.tabs.addTab(self.performance_tab, "🔥 Performance & Latency")
|
||||
# self.tabs.addTab(self.video_analysis_tab, "Video Analysis")
|
||||
# self.tabs.addTab(self.video_detection_tab, "Smart Intersection") # Commented out - split into two tabs
|
||||
self.tabs.addTab(self.video_detection_only_tab, "Video Detection")
|
||||
# self.tabs.addTab(self.smart_intersection_tab, "Smart Intersection") # Temporarily hidden
|
||||
if self.dashboard_tab:
|
||||
self.tabs.addTab(self.dashboard_tab, "Dashboard")
|
||||
self.tabs.addTab(self.performance_tab, "Performance & Latency")
|
||||
self.tabs.addTab(self.analytics_tab, "Analytics")
|
||||
self.tabs.addTab(self.violations_tab, "Violations")
|
||||
# VLM functionality moved to settings panel
|
||||
# self.tabs.addTab(self.vlm_tab, "🔍 Vision AI") # Add VLM tab with icon
|
||||
self.tabs.addTab(self.export_tab, "Export & Config")
|
||||
|
||||
# Add User Guide tab if available
|
||||
if self.user_guide_tab:
|
||||
self.tabs.addTab(self.user_guide_tab, "Help")
|
||||
|
||||
# Create config panel in dock widget
|
||||
self.config_panel = ConfigPanel()
|
||||
self.config_panel = ModernConfigPanel()
|
||||
dock = QDockWidget("Settings", self)
|
||||
dock.setObjectName("SettingsDock") # Set object name to avoid warning
|
||||
dock.setWidget(self.config_panel)
|
||||
dock.setFeatures(QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetClosable)
|
||||
dock.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
|
||||
|
||||
# Set minimum and preferred size for the dock widget
|
||||
dock.setMinimumWidth(400)
|
||||
dock.resize(450, 800) # Set preferred width and height
|
||||
|
||||
self.addDockWidget(Qt.RightDockWidgetArea, dock)
|
||||
|
||||
# Create status bar
|
||||
self.statusBar().showMessage("Initializing...")
|
||||
|
||||
# Create main layout with header
|
||||
main_layout = QVBoxLayout()
|
||||
|
||||
# Add header title above tabs
|
||||
header_label = QLabel("Traffic Intersection Monitoring System")
|
||||
header_label.setAlignment(Qt.AlignCenter)
|
||||
header_font = QFont()
|
||||
header_font.setPointSize(14)
|
||||
header_font.setBold(True)
|
||||
header_label.setFont(header_font)
|
||||
header_label.setStyleSheet("""
|
||||
QLabel {
|
||||
color: #ffffff;
|
||||
background-color: #2b2b2b;
|
||||
padding: 10px;
|
||||
border-bottom: 2px solid #0078d4;
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
""")
|
||||
main_layout.addWidget(header_label)
|
||||
|
||||
main_layout.addWidget(self.tabs)
|
||||
central = QWidget()
|
||||
central.setLayout(main_layout)
|
||||
self.setCentralWidget(central)
|
||||
|
||||
# Create menu bar
|
||||
self.setupMenus()
|
||||
# Create menu bar - commented out for cleaner interface
|
||||
# self.setupMenus()
|
||||
|
||||
# Create performance overlay
|
||||
self.performance_overlay = PerformanceOverlay()
|
||||
@@ -131,6 +226,17 @@ class MainWindow(QMainWindow):
|
||||
|
||||
# Create analytics controller
|
||||
self.analytics_controller = AnalyticsController()
|
||||
|
||||
# Initialize data publisher for InfluxDB
|
||||
print("[MAIN WINDOW DEBUG] Initializing Data Publisher...")
|
||||
self.data_publisher = DataPublisher(self.config_file)
|
||||
print("[MAIN WINDOW DEBUG] Data Publisher initialized successfully")
|
||||
|
||||
# VLM controller - using only local VLM folder, no backend
|
||||
print("[MAIN WINDOW DEBUG] Initializing VLM Controller with local VLM folder...")
|
||||
from controllers.vlm_controller_new import VLMController
|
||||
self.vlm_controller = VLMController() # No backend URL needed
|
||||
print("[MAIN WINDOW DEBUG] VLM Controller initialized successfully")
|
||||
|
||||
# Setup update timer for performance overlay
|
||||
self.perf_timer = QTimer()
|
||||
@@ -138,11 +244,56 @@ class MainWindow(QMainWindow):
|
||||
self.perf_timer.start(1000) # Update every second
|
||||
|
||||
# Connect video_file_controller outputs to video_detection_tab
|
||||
self.video_file_controller.frame_ready.connect(self.video_detection_tab.update_display, Qt.QueuedConnection)
|
||||
self.video_file_controller.stats_ready.connect(self.video_detection_tab.update_stats, Qt.QueuedConnection)
|
||||
self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.video_detection_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection)
|
||||
# Connect video file controller signals to both video tabs
|
||||
self.video_file_controller.frame_ready.connect(self.video_detection_only_tab.update_display, Qt.QueuedConnection)
|
||||
self.video_file_controller.stats_ready.connect(self.video_detection_only_tab.update_stats, Qt.QueuedConnection)
|
||||
self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.video_detection_only_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection)
|
||||
|
||||
self.video_file_controller.frame_ready.connect(self.smart_intersection_tab.update_display, Qt.QueuedConnection)
|
||||
self.video_file_controller.stats_ready.connect(self.smart_intersection_tab.update_stats, Qt.QueuedConnection)
|
||||
self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.smart_intersection_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection)
|
||||
|
||||
# Connect video frames to VLM insights for analysis
|
||||
if hasattr(self.video_file_controller, 'raw_frame_ready'):
|
||||
print("[MAIN WINDOW DEBUG] Connecting raw_frame_ready signal to VLM insights")
|
||||
self.video_file_controller.raw_frame_ready.connect(
|
||||
self._forward_frame_to_vlm, Qt.QueuedConnection
|
||||
)
|
||||
print("[MAIN WINDOW DEBUG] raw_frame_ready signal connected to VLM insights")
|
||||
|
||||
# Also connect to analytics tab
|
||||
print("[MAIN WINDOW DEBUG] Connecting raw_frame_ready signal to analytics tab")
|
||||
self.video_file_controller.raw_frame_ready.connect(
|
||||
self._forward_frame_to_analytics, Qt.QueuedConnection
|
||||
)
|
||||
print("[MAIN WINDOW DEBUG] raw_frame_ready signal connected to analytics tab")
|
||||
else:
|
||||
print("[MAIN WINDOW DEBUG] raw_frame_ready signal not found in video_file_controller")
|
||||
# Connect auto model/device selection signal
|
||||
self.video_detection_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection)
|
||||
# Connect video tab auto-select signals
|
||||
self.video_detection_only_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection)
|
||||
self.smart_intersection_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection)
|
||||
|
||||
# Connect VLM insights analysis requests to a simple mock handler (since optimum is disabled)
|
||||
print("[MAIN WINDOW DEBUG] Checking for VLM insights widget...")
|
||||
if hasattr(self.config_panel, 'vlm_insights_widget'):
|
||||
print("[MAIN WINDOW DEBUG] VLM insights widget found, connecting signals...")
|
||||
self.config_panel.vlm_insights_widget.analyze_frame_requested.connect(self._handle_vlm_analysis, Qt.QueuedConnection)
|
||||
print("[MAIN WINDOW DEBUG] VLM insights analysis signal connected")
|
||||
|
||||
# Connect pause state signal from video file controller to VLM insights
|
||||
if hasattr(self.video_file_controller, 'pause_state_changed'):
|
||||
self.video_file_controller.pause_state_changed.connect(self.config_panel.vlm_insights_widget.on_video_paused, Qt.QueuedConnection)
|
||||
print("[MAIN WINDOW DEBUG] VLM insights pause state signal connected")
|
||||
else:
|
||||
print("[MAIN WINDOW DEBUG] pause_state_changed signal not found in video_file_controller")
|
||||
else:
|
||||
print("[MAIN WINDOW DEBUG] VLM insights widget NOT found in config panel")
|
||||
|
||||
# Old VLM tab connections removed - functionality moved to insights widget
|
||||
# self.vlm_tab.process_image_requested.connect(self.vlm_controller.process_image, Qt.QueuedConnection)
|
||||
# self.video_controller.frame_np_ready.connect(self.vlm_tab.set_frame, Qt.QueuedConnection)
|
||||
# self.video_file_controller.frame_np_ready.connect(self.vlm_tab.set_frame, Qt.QueuedConnection)
|
||||
except Exception as e:
|
||||
QMessageBox.critical(
|
||||
self,
|
||||
@@ -150,6 +301,7 @@ class MainWindow(QMainWindow):
|
||||
f"Error initializing controllers: {str(e)}"
|
||||
)
|
||||
print(f"Error details: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
def connectSignals(self):
|
||||
@@ -212,14 +364,46 @@ class MainWindow(QMainWindow):
|
||||
self.export_tab.reload_config_btn.clicked.connect(self.load_config)
|
||||
self.export_tab.export_btn.clicked.connect(self.export_data)
|
||||
|
||||
# Video Detection tab connections
|
||||
self.video_detection_tab.file_selected.connect(self._handle_video_file_selected)
|
||||
self.video_detection_tab.play_clicked.connect(self._handle_video_play)
|
||||
self.video_detection_tab.pause_clicked.connect(self._handle_video_pause)
|
||||
self.video_detection_tab.stop_clicked.connect(self._handle_video_stop)
|
||||
self.video_detection_tab.detection_toggled.connect(self._handle_video_detection_toggle)
|
||||
self.video_detection_tab.screenshot_clicked.connect(self._handle_video_screenshot)
|
||||
self.video_detection_tab.seek_changed.connect(self._handle_video_seek)
|
||||
# Video Detection tab connections (standard tab)
|
||||
self.video_detection_only_tab.file_selected.connect(self._handle_video_file_selected)
|
||||
self.video_detection_only_tab.play_clicked.connect(self._handle_video_play)
|
||||
self.video_detection_only_tab.pause_clicked.connect(self._handle_video_pause)
|
||||
self.video_detection_only_tab.stop_clicked.connect(self._handle_video_stop)
|
||||
self.video_detection_only_tab.detection_toggled.connect(self._handle_video_detection_toggle)
|
||||
self.video_detection_only_tab.screenshot_clicked.connect(self._handle_video_screenshot)
|
||||
self.video_detection_only_tab.seek_changed.connect(self._handle_video_seek)
|
||||
|
||||
# Smart Intersection tab connections
|
||||
self.smart_intersection_tab.file_selected.connect(self._handle_video_file_selected)
|
||||
self.smart_intersection_tab.play_clicked.connect(self._handle_video_play)
|
||||
self.smart_intersection_tab.pause_clicked.connect(self._handle_video_pause)
|
||||
self.smart_intersection_tab.stop_clicked.connect(self._handle_video_stop)
|
||||
self.smart_intersection_tab.detection_toggled.connect(self._handle_video_detection_toggle)
|
||||
self.smart_intersection_tab.screenshot_clicked.connect(self._handle_video_screenshot)
|
||||
self.smart_intersection_tab.seek_changed.connect(self._handle_video_seek)
|
||||
|
||||
# Smart Intersection specific connections
|
||||
self.smart_intersection_tab.smart_intersection_enabled.connect(self._handle_smart_intersection_enabled)
|
||||
self.smart_intersection_tab.multi_camera_mode_enabled.connect(self._handle_multi_camera_mode)
|
||||
self.smart_intersection_tab.roi_configuration_changed.connect(self._handle_roi_configuration_changed)
|
||||
self.smart_intersection_tab.scene_analytics_toggled.connect(self._handle_scene_analytics_toggle)
|
||||
|
||||
# Connect smart intersection controller if available
|
||||
try:
|
||||
from controllers.smart_intersection_controller import SmartIntersectionController
|
||||
self.smart_intersection_controller = SmartIntersectionController()
|
||||
|
||||
# Connect scene analytics signals
|
||||
self.video_file_controller.frame_np_ready.connect(
|
||||
self.smart_intersection_controller.process_frame, Qt.QueuedConnection
|
||||
)
|
||||
self.smart_intersection_controller.scene_analytics_ready.connect(
|
||||
self._handle_scene_analytics_update, Qt.QueuedConnection
|
||||
)
|
||||
print("✅ Smart Intersection Controller connected")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Smart Intersection Controller not available: {e}")
|
||||
self.smart_intersection_controller = None
|
||||
|
||||
# Connect OpenVINO device info signal to config panel from BOTH controllers
|
||||
self.video_controller.device_info_ready.connect(self.config_panel.update_devices_info, Qt.QueuedConnection)
|
||||
@@ -227,7 +411,57 @@ class MainWindow(QMainWindow):
|
||||
|
||||
# After connecting video_file_controller and video_detection_tab, trigger auto model/device update
|
||||
QTimer.singleShot(0, self.video_file_controller.auto_select_model_device.emit)
|
||||
|
||||
# Connect performance statistics from both controllers
|
||||
self.video_controller.performance_stats_ready.connect(self.update_performance_graphs)
|
||||
self.video_file_controller.performance_stats_ready.connect(self.update_performance_graphs)
|
||||
|
||||
# Connect enhanced performance tab signals
|
||||
if hasattr(self, 'performance_tab'):
|
||||
try:
|
||||
# Connect performance tab signals for better integration
|
||||
self.performance_tab.spike_detected.connect(self.handle_performance_spike)
|
||||
self.performance_tab.device_switched.connect(self.handle_device_switch_notification)
|
||||
self.performance_tab.performance_data_updated.connect(self.handle_performance_data_update)
|
||||
print("✅ Performance tab signals connected successfully")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Could not connect performance tab signals: {e}")
|
||||
|
||||
@Slot(dict)
|
||||
def handle_performance_spike(self, spike_data):
|
||||
"""Handle performance spike detection"""
|
||||
try:
|
||||
latency = spike_data.get('latency', 0)
|
||||
device = spike_data.get('device', 'Unknown')
|
||||
print(f"🚨 Performance spike detected: {latency:.1f}ms on {device}")
|
||||
|
||||
# Optionally show notification or log to analytics
|
||||
if hasattr(self, 'analytics_tab'):
|
||||
# Could add spike to analytics if needed
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error handling performance spike: {e}")
|
||||
|
||||
@Slot(str)
|
||||
def handle_device_switch_notification(self, device):
|
||||
"""Handle device switch notification"""
|
||||
try:
|
||||
print(f"🔄 Device switched to: {device}")
|
||||
# Could update UI elements or show notification
|
||||
except Exception as e:
|
||||
print(f"❌ Error handling device switch notification: {e}")
|
||||
|
||||
@Slot(dict)
|
||||
def handle_performance_data_update(self, performance_data):
|
||||
"""Handle performance data updates for other components"""
|
||||
try:
|
||||
# Could forward to other tabs or components that need performance data
|
||||
if hasattr(self, 'analytics_tab'):
|
||||
# Forward performance data to analytics if needed
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"❌ Error handling performance data update: {e}")
|
||||
def setupMenus(self):
|
||||
"""Set up application menus"""
|
||||
# File menu
|
||||
@@ -284,16 +518,46 @@ class MainWindow(QMainWindow):
|
||||
if not config:
|
||||
return
|
||||
|
||||
# Update config
|
||||
for section in config:
|
||||
if section in self.config:
|
||||
self.config[section].update(config[section])
|
||||
else:
|
||||
self.config[section] = config[section]
|
||||
# Convert flat config to nested structure for model manager
|
||||
nested_config = {
|
||||
"detection": {}
|
||||
}
|
||||
|
||||
# Update model manager
|
||||
# Map config panel values to model manager format
|
||||
if 'device' in config:
|
||||
nested_config["detection"]["device"] = config['device']
|
||||
if 'model' in config:
|
||||
# Convert YOLOv11x format to yolo11x format for model manager
|
||||
model_name = config['model'].lower()
|
||||
if 'yolov11' in model_name:
|
||||
model_name = model_name.replace('yolov11', 'yolo11')
|
||||
elif model_name == 'auto':
|
||||
model_name = 'auto'
|
||||
nested_config["detection"]["model"] = model_name
|
||||
if 'confidence_threshold' in config:
|
||||
nested_config["detection"]["confidence_threshold"] = config['confidence_threshold']
|
||||
if 'iou_threshold' in config:
|
||||
nested_config["detection"]["iou_threshold"] = config['iou_threshold']
|
||||
|
||||
print(f"🔧 Main Window: Applying config to model manager: {nested_config}")
|
||||
print(f"🔧 Main Window: Received config from panel: {config}")
|
||||
|
||||
# Update config
|
||||
for section in nested_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(nested_config[section])
|
||||
else:
|
||||
self.config[section] = nested_config[section]
|
||||
|
||||
# Update model manager with nested config
|
||||
if self.model_manager:
|
||||
self.model_manager.update_config(self.config)
|
||||
self.model_manager.update_config(nested_config)
|
||||
|
||||
# Refresh model information in video controllers
|
||||
if hasattr(self, 'video_controller') and self.video_controller:
|
||||
self.video_controller.refresh_model_info()
|
||||
if hasattr(self, 'video_file_controller') and self.video_file_controller:
|
||||
self.video_file_controller.refresh_model_info()
|
||||
|
||||
# Save config to file
|
||||
save_configuration(self.config, self.config_file)
|
||||
@@ -302,7 +566,9 @@ class MainWindow(QMainWindow):
|
||||
self.export_tab.update_config_display(self.config)
|
||||
|
||||
# Update status
|
||||
self.statusBar().showMessage("Configuration applied", 2000)
|
||||
device = config.get('device', 'Unknown')
|
||||
model = config.get('model', 'Unknown')
|
||||
self.statusBar().showMessage(f"Configuration applied - Device: {device}, Model: {model}", 3000)
|
||||
|
||||
@Slot()
|
||||
def load_config(self):
|
||||
@@ -642,6 +908,7 @@ class MainWindow(QMainWindow):
|
||||
confidence_str = f" (Confidence: {confidence:.2f})" if confidence > 0 else ""
|
||||
else:
|
||||
traffic_light_color = traffic_light_info
|
||||
confidence = 1.0
|
||||
confidence_str = ""
|
||||
|
||||
if traffic_light_color != 'unknown':
|
||||
@@ -653,6 +920,16 @@ class MainWindow(QMainWindow):
|
||||
else:
|
||||
color_text = str(traffic_light_color).upper()
|
||||
self.statusBar().showMessage(f"Traffic Light: {color_text}{confidence_str}")
|
||||
|
||||
# Publish traffic light status to InfluxDB
|
||||
if hasattr(self, 'data_publisher') and self.data_publisher:
|
||||
try:
|
||||
color_for_publishing = traffic_light_color
|
||||
if isinstance(traffic_light_color, dict):
|
||||
color_for_publishing = traffic_light_color.get("color", "unknown")
|
||||
self.data_publisher.publish_traffic_light_status(color_for_publishing, confidence)
|
||||
except Exception as e:
|
||||
print(f"❌ Error publishing traffic light status: {e}")
|
||||
@Slot(dict)
|
||||
def handle_violation_detected(self, violation):
|
||||
"""Handle a detected traffic violation"""
|
||||
@@ -663,9 +940,28 @@ class MainWindow(QMainWindow):
|
||||
# Add to violations tab
|
||||
self.violations_tab.add_violation(violation)
|
||||
|
||||
# Update analytics tab with violation data
|
||||
if hasattr(self.analytics_tab, 'update_violation_data'):
|
||||
self.analytics_tab.update_violation_data(violation)
|
||||
print(f"[ANALYTICS DEBUG] Violation data forwarded to analytics tab")
|
||||
|
||||
# Update analytics
|
||||
if self.analytics_controller:
|
||||
self.analytics_controller.register_violation(violation)
|
||||
|
||||
# Publish violation to InfluxDB
|
||||
if hasattr(self, 'data_publisher') and self.data_publisher:
|
||||
try:
|
||||
violation_type = violation.get('type', 'red_light_violation')
|
||||
vehicle_id = violation.get('track_id', 'unknown')
|
||||
details = {
|
||||
'timestamp': violation.get('timestamp', ''),
|
||||
'confidence': violation.get('confidence', 1.0),
|
||||
'location': violation.get('location', 'crosswalk')
|
||||
}
|
||||
self.data_publisher.publish_violation_event(violation_type, vehicle_id, details)
|
||||
except Exception as e:
|
||||
print(f"❌ Error publishing violation event: {e}")
|
||||
|
||||
print(f"🚨 Violation processed: {violation['id']} at {violation['timestamp']}")
|
||||
except Exception as e:
|
||||
@@ -678,10 +974,29 @@ class MainWindow(QMainWindow):
|
||||
self.video_file_controller.set_source(file_path)
|
||||
def _handle_video_play(self):
|
||||
print("[VideoDetection] Play clicked")
|
||||
self.video_file_controller.play()
|
||||
# Check if video is paused, if so resume, otherwise start
|
||||
if hasattr(self.video_file_controller, '_paused') and self.video_file_controller._paused:
|
||||
self.video_file_controller.resume()
|
||||
else:
|
||||
self.video_file_controller.play()
|
||||
# Notify VLM insights that video is playing (not paused)
|
||||
print("[MAIN WINDOW DEBUG] Notifying VLM insights: video playing")
|
||||
if hasattr(self, 'config_panel') and hasattr(self.config_panel, 'vlm_insights_widget'):
|
||||
self.config_panel.vlm_insights_widget.on_video_paused(False)
|
||||
print("[MAIN WINDOW DEBUG] VLM insights notified: not paused")
|
||||
else:
|
||||
print("[MAIN WINDOW DEBUG] VLM insights not found for play notification")
|
||||
|
||||
def _handle_video_pause(self):
|
||||
print("[VideoDetection] Pause clicked")
|
||||
self.video_file_controller.pause()
|
||||
# Notify VLM insights that video is paused
|
||||
print("[MAIN WINDOW DEBUG] Notifying VLM insights: video paused")
|
||||
if hasattr(self, 'config_panel') and hasattr(self.config_panel, 'vlm_insights_widget'):
|
||||
self.config_panel.vlm_insights_widget.on_video_paused(True)
|
||||
print("[MAIN WINDOW DEBUG] VLM insights notified: paused")
|
||||
else:
|
||||
print("[MAIN WINDOW DEBUG] VLM insights not found for pause notification")
|
||||
def _handle_video_stop(self):
|
||||
print("[VideoDetection] Stop clicked")
|
||||
self.video_file_controller.stop()
|
||||
@@ -727,24 +1042,368 @@ class MainWindow(QMainWindow):
|
||||
self.statusBar().showMessage(f"Error switching device: {e}", 3000)
|
||||
@Slot(dict)
|
||||
def update_performance_graphs(self, stats):
|
||||
"""Update the performance graphs using the new robust widget logic."""
|
||||
"""Update the performance graphs using the enhanced widget logic."""
|
||||
if not hasattr(self, 'performance_tab'):
|
||||
return
|
||||
print(f"[PERF DEBUG] update_performance_graphs called with: {stats}")
|
||||
|
||||
# Publish performance data to InfluxDB
|
||||
if hasattr(self, 'data_publisher') and self.data_publisher:
|
||||
try:
|
||||
fps = stats.get('fps', 0)
|
||||
inference_time = stats.get('inference_time', 0)
|
||||
cpu_usage = stats.get('cpu_usage', None)
|
||||
gpu_usage = stats.get('gpu_usage', None)
|
||||
|
||||
self.data_publisher.publish_performance_data(fps, inference_time, cpu_usage, gpu_usage)
|
||||
|
||||
# Publish device info periodically (every 10th frame)
|
||||
if hasattr(self, '_device_info_counter'):
|
||||
self._device_info_counter += 1
|
||||
else:
|
||||
self._device_info_counter = 1
|
||||
|
||||
if self._device_info_counter % 10 == 0:
|
||||
self.data_publisher.publish_device_info()
|
||||
except Exception as e:
|
||||
print(f"❌ Error publishing performance data: {e}")
|
||||
|
||||
# Enhanced analytics data with proper structure
|
||||
current_time = time.time()
|
||||
analytics_data = {
|
||||
'real_time_data': {
|
||||
'timestamps': [stats.get('frame_idx', 0)],
|
||||
'timestamps': [current_time],
|
||||
'inference_latency': [stats.get('inference_time', 0)],
|
||||
'fps': [stats.get('fps', 0)],
|
||||
'device_usage': [1 if stats.get('device', 'CPU') == 'GPU' else 0],
|
||||
'resolution_width': [int(stats.get('resolution', '640x360').split('x')[0]) if 'x' in stats.get('resolution', '') else 640],
|
||||
'resolution_height': [int(stats.get('resolution', '640x360').split('x')[1]) if 'x' in stats.get('resolution', '') else 360],
|
||||
'device_switches': [0] if stats.get('is_device_switch', False) else [],
|
||||
'resolution_changes': [0] if stats.get('is_res_change', False) else [],
|
||||
},
|
||||
'latency_statistics': {},
|
||||
'current_metrics': {},
|
||||
'system_metrics': {},
|
||||
'latency_statistics': {
|
||||
'avg': stats.get('avg_inference_time', 0),
|
||||
'max': stats.get('max_inference_time', 0),
|
||||
'min': stats.get('min_inference_time', 0),
|
||||
'spike_count': stats.get('spike_count', 0)
|
||||
},
|
||||
'current_metrics': {
|
||||
'device': stats.get('device', 'CPU'),
|
||||
'resolution': stats.get('resolution', 'Unknown'),
|
||||
'model': stats.get('model_name', stats.get('model', 'Unknown')), # Try model_name first, then model
|
||||
'fps': stats.get('fps', 0),
|
||||
'inference_time': stats.get('inference_time', 0)
|
||||
},
|
||||
'system_metrics': {
|
||||
'cpu_usage': stats.get('cpu_usage', 0),
|
||||
'gpu_usage': stats.get('gpu_usage', 0),
|
||||
'memory_usage': stats.get('memory_usage', 0)
|
||||
}
|
||||
}
|
||||
print(f"[PERF DEBUG] analytics_data for update_performance_data: {analytics_data}")
|
||||
|
||||
print(f"[PERF DEBUG] Enhanced analytics_data: {analytics_data}")
|
||||
|
||||
# Update performance graphs with enhanced data
|
||||
self.performance_tab.update_performance_data(analytics_data)
|
||||
|
||||
def _handle_vlm_analysis(self, frame, prompt):
|
||||
"""Handle VLM analysis requests."""
|
||||
print(f"[MAIN WINDOW DEBUG] _handle_vlm_analysis called")
|
||||
print(f"[MAIN WINDOW DEBUG] Frame type: {type(frame)}, shape: {frame.shape if hasattr(frame, 'shape') else 'N/A'}")
|
||||
print(f"[MAIN WINDOW DEBUG] Prompt: '{prompt}'")
|
||||
|
||||
try:
|
||||
# Check if VLM controller is available
|
||||
if hasattr(self, 'vlm_controller') and self.vlm_controller:
|
||||
print(f"[MAIN WINDOW DEBUG] Using VLM controller for analysis")
|
||||
|
||||
# Connect VLM result to insights widget if not already connected
|
||||
if not hasattr(self, '_vlm_connected'):
|
||||
print(f"[MAIN WINDOW DEBUG] Connecting VLM controller results to insights widget")
|
||||
self.vlm_controller.result_ready.connect(
|
||||
lambda result: self._handle_vlm_result(result),
|
||||
Qt.QueuedConnection
|
||||
)
|
||||
self._vlm_connected = True
|
||||
|
||||
# Process image with VLM controller
|
||||
self.vlm_controller.process_image(frame, prompt)
|
||||
print(f"[MAIN WINDOW DEBUG] VLM controller processing started")
|
||||
|
||||
else:
|
||||
print(f"[MAIN WINDOW DEBUG] VLM controller not available, using mock analysis")
|
||||
# Fallback to mock analysis
|
||||
import cv2
|
||||
import numpy as np
|
||||
result = self._generate_mock_analysis(frame, prompt)
|
||||
print(f"[MAIN WINDOW DEBUG] Mock analysis generated: {len(result)} characters")
|
||||
|
||||
# Send result back to VLM insights widget
|
||||
if hasattr(self.config_panel, 'vlm_insights_widget'):
|
||||
print(f"[MAIN WINDOW DEBUG] Sending mock result to VLM insights widget")
|
||||
self.config_panel.vlm_insights_widget.on_analysis_result(result)
|
||||
print(f"[MAIN WINDOW DEBUG] Mock result sent successfully")
|
||||
else:
|
||||
print(f"[MAIN WINDOW DEBUG] VLM insights widget not found")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[VLM ERROR] Error in analysis: {e}")
|
||||
if hasattr(self.config_panel, 'vlm_insights_widget'):
|
||||
self.config_panel.vlm_insights_widget.on_analysis_result(f"Analysis error: {str(e)}")
|
||||
|
||||
def _handle_vlm_result(self, result):
|
||||
"""Handle VLM controller results."""
|
||||
print(f"[MAIN WINDOW DEBUG] _handle_vlm_result called")
|
||||
print(f"[MAIN WINDOW DEBUG] Result type: {type(result)}")
|
||||
|
||||
try:
|
||||
# Extract answer from result dict
|
||||
if isinstance(result, dict):
|
||||
if 'response' in result:
|
||||
answer = result['response']
|
||||
print(f"[MAIN WINDOW DEBUG] Extracted response: {len(str(answer))} characters")
|
||||
elif 'answer' in result:
|
||||
answer = result['answer']
|
||||
print(f"[MAIN WINDOW DEBUG] Extracted answer: {len(str(answer))} characters")
|
||||
else:
|
||||
answer = str(result)
|
||||
print(f"[MAIN WINDOW DEBUG] Using result as string: {len(answer)} characters")
|
||||
else:
|
||||
answer = str(result)
|
||||
print(f"[MAIN WINDOW DEBUG] Using result as string: {len(answer)} characters")
|
||||
|
||||
# Send result to VLM insights widget
|
||||
if hasattr(self.config_panel, 'vlm_insights_widget'):
|
||||
print(f"[MAIN WINDOW DEBUG] Sending VLM result to insights widget")
|
||||
self.config_panel.vlm_insights_widget.on_analysis_result(answer)
|
||||
print(f"[MAIN WINDOW DEBUG] VLM result sent successfully")
|
||||
else:
|
||||
print(f"[MAIN WINDOW DEBUG] VLM insights widget not found")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[VLM ERROR] Error handling VLM result: {e}")
|
||||
|
||||
def _forward_frame_to_vlm(self, frame, detections, fps):
|
||||
"""Forward frame to VLM insights widget."""
|
||||
print(f"[MAIN WINDOW DEBUG] _forward_frame_to_vlm called")
|
||||
print(f"[MAIN WINDOW DEBUG] Frame type: {type(frame)}, shape: {frame.shape if hasattr(frame, 'shape') else 'N/A'}")
|
||||
print(f"[MAIN WINDOW DEBUG] Detections count: {len(detections) if detections else 0}")
|
||||
print(f"[MAIN WINDOW DEBUG] FPS: {fps}")
|
||||
|
||||
# Publish detection events to InfluxDB
|
||||
if hasattr(self, 'data_publisher') and self.data_publisher and detections:
|
||||
try:
|
||||
# Count vehicles and pedestrians
|
||||
vehicle_count = 0
|
||||
pedestrian_count = 0
|
||||
|
||||
for detection in detections:
|
||||
label = ""
|
||||
if isinstance(detection, dict):
|
||||
label = detection.get('label', '').lower()
|
||||
elif hasattr(detection, 'label'):
|
||||
label = getattr(detection, 'label', '').lower()
|
||||
elif hasattr(detection, 'class_name'):
|
||||
label = getattr(detection, 'class_name', '').lower()
|
||||
elif hasattr(detection, 'cls'):
|
||||
label = str(getattr(detection, 'cls', '')).lower()
|
||||
|
||||
# Debug the label detection
|
||||
if label and label != 'traffic light':
|
||||
print(f"[PUBLISHER DEBUG] Detected object: {label}")
|
||||
|
||||
if label in ['car', 'truck', 'bus', 'motorcycle', 'vehicle']:
|
||||
vehicle_count += 1
|
||||
elif label in ['person', 'pedestrian']:
|
||||
pedestrian_count += 1
|
||||
|
||||
# Also try to get vehicle count from tracked vehicles if available
|
||||
if vehicle_count == 0 and hasattr(self, 'video_file_controller'):
|
||||
try:
|
||||
# Try to get vehicle count from current analysis data
|
||||
analysis_data = getattr(self.video_file_controller, 'get_current_analysis_data', lambda: {})()
|
||||
if isinstance(analysis_data, dict):
|
||||
tracked_vehicles = analysis_data.get('tracked_vehicles', [])
|
||||
if tracked_vehicles:
|
||||
vehicle_count = len(tracked_vehicles)
|
||||
print(f"[PUBLISHER DEBUG] Using tracked vehicle count: {vehicle_count}")
|
||||
except:
|
||||
pass
|
||||
|
||||
self.data_publisher.publish_detection_events(vehicle_count, pedestrian_count)
|
||||
except Exception as e:
|
||||
print(f"❌ Error publishing detection events: {e}")
|
||||
|
||||
try:
|
||||
if hasattr(self.config_panel, 'vlm_insights_widget'):
|
||||
print(f"[MAIN WINDOW DEBUG] Forwarding frame to VLM insights widget")
|
||||
self.config_panel.vlm_insights_widget.set_current_frame(frame)
|
||||
|
||||
# Store detection data for VLM analysis
|
||||
if hasattr(self.config_panel.vlm_insights_widget, 'set_detection_data'):
|
||||
print(f"[MAIN WINDOW DEBUG] Setting detection data for VLM")
|
||||
detection_data = {
|
||||
'detections': detections,
|
||||
'fps': fps,
|
||||
'timestamp': time.time()
|
||||
}
|
||||
# Get additional data from video controller if available
|
||||
if hasattr(self.video_file_controller, 'get_current_analysis_data'):
|
||||
analysis_data = self.video_file_controller.get_current_analysis_data()
|
||||
detection_data.update(analysis_data)
|
||||
|
||||
self.config_panel.vlm_insights_widget.set_detection_data(detection_data)
|
||||
print(f"[MAIN WINDOW DEBUG] Detection data set successfully")
|
||||
|
||||
print(f"[MAIN WINDOW DEBUG] Frame forwarded successfully")
|
||||
else:
|
||||
print(f"[MAIN WINDOW DEBUG] VLM insights widget not found for frame forwarding")
|
||||
except Exception as e:
|
||||
print(f"[MAIN WINDOW DEBUG] Error forwarding frame to VLM: {e}")
|
||||
|
||||
def _forward_frame_to_analytics(self, frame, detections, fps):
|
||||
"""Forward frame data to analytics tab for real-time updates."""
|
||||
try:
|
||||
print(f"[ANALYTICS DEBUG] Forwarding frame data to analytics tab")
|
||||
print(f"[ANALYTICS DEBUG] Detections count: {len(detections) if detections else 0}")
|
||||
|
||||
# Prepare detection data for analytics
|
||||
detection_data = {
|
||||
'detections': detections,
|
||||
'fps': fps,
|
||||
'timestamp': time.time(),
|
||||
'frame_shape': frame.shape if hasattr(frame, 'shape') else None
|
||||
}
|
||||
|
||||
# Get additional analysis data from video controller
|
||||
if hasattr(self.video_file_controller, 'get_current_analysis_data'):
|
||||
analysis_data = self.video_file_controller.get_current_analysis_data()
|
||||
if analysis_data:
|
||||
detection_data.update(analysis_data)
|
||||
print(f"[ANALYTICS DEBUG] Updated with analysis data: {list(analysis_data.keys())}")
|
||||
|
||||
# Forward to analytics tab
|
||||
if hasattr(self.analytics_tab, 'update_detection_data'):
|
||||
self.analytics_tab.update_detection_data(detection_data)
|
||||
print(f"[ANALYTICS DEBUG] Detection data forwarded to analytics tab successfully")
|
||||
else:
|
||||
print(f"[ANALYTICS DEBUG] Analytics tab update_detection_data method not found")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[ANALYTICS DEBUG] Error forwarding frame to analytics: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _generate_mock_analysis(self, frame, prompt):
|
||||
"""Generate a mock analysis response based on frame content and prompt."""
|
||||
try:
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# Analyze frame properties
|
||||
h, w = frame.shape[:2] if frame is not None else (0, 0)
|
||||
|
||||
# Basic image analysis
|
||||
analysis_parts = []
|
||||
|
||||
if "traffic" in prompt.lower():
|
||||
analysis_parts.append("🚦 Traffic Analysis:")
|
||||
analysis_parts.append(f"• Frame resolution: {w}x{h}")
|
||||
analysis_parts.append("• Detected scene: Urban traffic intersection")
|
||||
analysis_parts.append("• Visible elements: Road, potential vehicles")
|
||||
analysis_parts.append("• Traffic flow appears to be moderate")
|
||||
|
||||
elif "safety" in prompt.lower():
|
||||
analysis_parts.append("⚠️ Safety Assessment:")
|
||||
analysis_parts.append("• Monitoring for traffic violations")
|
||||
analysis_parts.append("• Checking lane discipline")
|
||||
analysis_parts.append("• Observing traffic light compliance")
|
||||
analysis_parts.append("• Overall safety level: Monitoring required")
|
||||
|
||||
else:
|
||||
analysis_parts.append("🔍 General Analysis:")
|
||||
analysis_parts.append(f"• Image dimensions: {w}x{h} pixels")
|
||||
analysis_parts.append("• Scene type: Traffic monitoring view")
|
||||
analysis_parts.append("• Quality: Processing frame for analysis")
|
||||
analysis_parts.append(f"• Prompt: {prompt[:100]}...")
|
||||
|
||||
# Add timestamp and disclaimer
|
||||
from datetime import datetime
|
||||
timestamp = datetime.now().strftime("%H:%M:%S")
|
||||
analysis_parts.append(f"\n📝 Analysis completed at {timestamp}")
|
||||
analysis_parts.append("ℹ️ Note: This is a mock analysis. Full AI analysis requires compatible OpenVINO setup.")
|
||||
|
||||
return "\n".join(analysis_parts)
|
||||
|
||||
except Exception as e:
|
||||
return f"Unable to analyze frame: {str(e)}"
|
||||
|
||||
# Smart Intersection Signal Handlers
|
||||
@Slot(bool)
|
||||
def _handle_smart_intersection_enabled(self, enabled):
|
||||
"""Handle smart intersection mode toggle"""
|
||||
print(f"🚦 Smart Intersection mode {'enabled' if enabled else 'disabled'}")
|
||||
|
||||
if self.smart_intersection_controller:
|
||||
self.smart_intersection_controller.set_enabled(enabled)
|
||||
|
||||
# Update status
|
||||
if enabled:
|
||||
self.statusBar().showMessage("Smart Intersection mode activated")
|
||||
else:
|
||||
self.statusBar().showMessage("Standard detection mode")
|
||||
|
||||
@Slot(bool)
|
||||
def _handle_multi_camera_mode(self, enabled):
|
||||
"""Handle multi-camera mode toggle"""
|
||||
print(f"📹 Multi-camera mode {'enabled' if enabled else 'disabled'}")
|
||||
|
||||
if self.smart_intersection_controller:
|
||||
self.smart_intersection_controller.set_multi_camera_mode(enabled)
|
||||
|
||||
@Slot(dict)
|
||||
def _handle_roi_configuration_changed(self, roi_config):
|
||||
"""Handle ROI configuration changes"""
|
||||
print(f"🎯 ROI configuration updated: {len(roi_config.get('rois', []))} regions")
|
||||
|
||||
if self.smart_intersection_controller:
|
||||
self.smart_intersection_controller.update_roi_config(roi_config)
|
||||
|
||||
@Slot(bool)
|
||||
def _handle_scene_analytics_toggle(self, enabled):
|
||||
"""Handle scene analytics toggle"""
|
||||
print(f"📊 Scene analytics {'enabled' if enabled else 'disabled'}")
|
||||
|
||||
if self.smart_intersection_controller:
|
||||
self.smart_intersection_controller.set_scene_analytics(enabled)
|
||||
|
||||
@Slot(dict)
|
||||
def _handle_scene_analytics_update(self, analytics_data):
|
||||
"""Handle scene analytics data updates"""
|
||||
try:
|
||||
# Update video detection tab with smart intersection data
|
||||
smart_stats = {
|
||||
'total_objects': analytics_data.get('total_objects', 0),
|
||||
'active_tracks': analytics_data.get('active_tracks', 0),
|
||||
'roi_events': analytics_data.get('roi_events', 0),
|
||||
'crosswalk_events': analytics_data.get('crosswalk_events', 0),
|
||||
'lane_events': analytics_data.get('lane_events', 0),
|
||||
'safety_events': analytics_data.get('safety_events', 0),
|
||||
'north_objects': analytics_data.get('camera_stats', {}).get('north', 0),
|
||||
'east_objects': analytics_data.get('camera_stats', {}).get('east', 0),
|
||||
'south_objects': analytics_data.get('camera_stats', {}).get('south', 0),
|
||||
'west_objects': analytics_data.get('camera_stats', {}).get('west', 0),
|
||||
'fps': analytics_data.get('fps', 0),
|
||||
'processing_time': analytics_data.get('processing_time_ms', 0),
|
||||
'gpu_usage': analytics_data.get('gpu_usage', 0),
|
||||
'memory_usage': analytics_data.get('memory_usage', 0)
|
||||
}
|
||||
|
||||
# Update both video tabs with stats
|
||||
self.video_detection_only_tab.update_stats(smart_stats)
|
||||
self.smart_intersection_tab.update_stats(smart_stats)
|
||||
|
||||
# Update analytics tab if it has smart intersection support
|
||||
if hasattr(self.analytics_tab, 'update_smart_intersection_analytics'):
|
||||
self.analytics_tab.update_smart_intersection_analytics(analytics_data)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error handling scene analytics update: {e}")
|
||||
|
||||
Reference in New Issue
Block a user