diff --git a/detection_openvino.py b/detection_openvino.py
index b62e000..81b3289 100644
--- a/detection_openvino.py
+++ b/detection_openvino.py
@@ -7,7 +7,6 @@ import cv2
import numpy as np
from pathlib import Path
from typing import List, Dict, Tuple, Optional
-from red_light_violation_pipeline import RedLightViolationPipeline
# --- Install required packages if missing ---
try:
@@ -50,7 +49,10 @@ COCO_CLASSES = {
}
# Traffic-related classes we're interested in (using standard COCO indices)
-TRAFFIC_CLASS_NAMES = COCO_CLASSES
+TRAFFIC_CLASS_NAMES = [
+ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck',
+ 'traffic light', 'stop sign', 'parking meter'
+]
# --- Model Conversion and Quantization ---
def convert_yolo_to_openvino(model_name: str = "yolo11x", half: bool = True) -> Path:
@@ -76,266 +78,6 @@ def quantize_openvino_model(ov_xml: Path, model_name: str = "yolo11x") -> Path:
print("Quantization requires a calibration dataset. Skipping actual quantization in this demo.")
return ov_xml # Return FP32 if no quantization
-# --- OpenVINO Inference Pipeline ---
-class OpenVINOYOLODetector:
- def __init__(self, model_xml: Path, device: str = "AUTO"):
- self.core = ov.Core()
- self.device = device
- self.model = self.core.read_model(model_xml)
- self.input_shape = self.model.inputs[0].shape
- self.input_height = self.input_shape[2]
- self.input_width = self.input_shape[3]
- self.ov_config = {}
- if device != "CPU":
- self.model.reshape({0: [1, 3, 640, 640]})
- if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices):
- self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
- self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config)
- self.output_layer = self.compiled_model.output(0)
-
- def preprocess(self, frame: np.ndarray) -> np.ndarray:
- img = cv2.resize(frame, (self.input_width, self.input_height))
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img = img.astype(np.float32) / 255.0
- img = img.transpose(2, 0, 1)[None]
- return img
-
- def infer(self, frame: np.ndarray, conf_threshold: float = 0.25) -> List[Dict]:
- input_tensor = self.preprocess(frame)
- output = self.compiled_model([input_tensor])[self.output_layer]
- return self.postprocess(output, frame.shape, conf_threshold)
-
- def postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]:
- # Output: (1, 84, 8400) or (84, 8400) or (8400, 84)
- if output.ndim == 3:
- output = np.squeeze(output)
- if output.shape[0] == 84:
- output = output.T # (8400, 84)
- boxes = output[:, :4]
- scores = output[:, 4:]
- class_ids = np.argmax(scores, axis=1)
- confidences = np.max(scores, axis=1)
- detections = []
- h, w = frame_shape[:2]
- for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)):
- if score < conf_threshold:
- continue
- x_c, y_c, bw, bh = box
- # If normalized, scale to input size
- if all(0.0 <= v <= 1.0 for v in box):
- x_c *= self.input_width
- y_c *= self.input_height
- bw *= self.input_width
- bh *= self.input_height
- # Scale to original frame size
- scale_x = w / self.input_width
- scale_y = h / self.input_height
- x_c *= scale_x
- y_c *= scale_y
- bw *= scale_x
- bh *= scale_y
- x1 = int(round(x_c - bw / 2))
- y1 = int(round(y_c - bh / 2))
- x2 = int(round(x_c + bw / 2))
- y2 = int(round(y_c + bh / 2))
- x1 = max(0, min(x1, w - 1))
- y1 = max(0, min(y1, h - 1))
- x2 = max(0, min(x2, w - 1))
- y2 = max(0, min(y2, h - 1))
- if x2 <= x1 or y2 <= y1:
- continue
- # Only keep class 9 as traffic light, rename if found
- if class_id == 9:
- class_name = "traffic light"
- elif class_id < len(TRAFFIC_CLASS_NAMES):
- class_name = TRAFFIC_CLASS_NAMES[class_id]
- else:
- continue # Remove unknown/other classes
- detections.append({
- 'bbox': [x1, y1, x2, y2],
- 'confidence': float(score),
- 'class_id': int(class_id),
- 'class_name': class_name
- })
- return detections
-
- def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray:
- # 80+ visually distinct colors for COCO classes (BGR)
- COCO_COLORS = [
- (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49),
- (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187),
- (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236),
- (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199),
- (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49),
- (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187),
- (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236),
- (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199),
- (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49),
- (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187),
- (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
- (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
- (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
- (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
- (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49)
- ]
- for det in detections:
- x1, y1, x2, y2 = det['bbox']
- label = f"{det['class_name']} {det['confidence']:.2f}"
- color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)]
- cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness)
- cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
- return frame
-
-# --- Video/Image/Live Inference ---
-def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None):
- if isinstance(source, str) and not os.path.exists(source):
- print(f"Downloading sample video: {source}")
- import requests
- url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4"
- r = requests.get(url)
- with open(source, 'wb') as f:
- f.write(r.content)
- cap = cv2.VideoCapture(source)
- if not cap.isOpened():
- print(f"Failed to open video source: {source}")
- return
- window_name = "YOLOv11x + OpenVINO Detection"
- if use_popup:
- cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)
- frame_count = 0
- times = []
- while True:
- ret, frame = cap.read()
- if not ret:
- break
- if flip:
- frame = cv2.flip(frame, 1)
- if video_width:
- scale = video_width / max(frame.shape[:2])
- frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
- start = time.time()
- detections = detector.infer(frame, conf_threshold=conf_threshold)
- frame = detector.draw(frame, detections)
- elapsed = time.time() - start
- times.append(elapsed)
- if len(times) > 200:
- times.pop(0)
- fps = 1.0 / np.mean(times) if times else 0
- cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
- if use_popup:
- cv2.imshow(window_name, frame)
- if cv2.waitKey(1) & 0xFF == 27:
- break
- else:
- cv2.imshow(window_name, frame)
- if cv2.waitKey(1) & 0xFF == 27:
- break
- frame_count += 1
- cap.release()
- cv2.destroyAllWindows()
-
-# --- Main Entrypoint ---
-if __name__ == "__main__":
- # Choose model: yolo11x or yolo11n, etc.
- MODEL_NAME = "yolo11x"
- DEVICE = "AUTO" # or "CPU", "GPU"
- # Step 1: Convert model if needed
- ov_xml = convert_yolo_to_openvino(MODEL_NAME)
- # Step 2: Quantize (optional, demo skips actual quantization)
- ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME)
- # Step 3: Create detector
- detector = OpenVINOYOLODetector(ov_xml, device=DEVICE)
- # Step 4: Run on webcam, video, or image
- # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg"
- run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280)
-# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25)
-# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25)
-# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed.
-
-import numpy as np
-import cv2
-
-def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None):
- """
- output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85])
- conf_threshold: minimum confidence
- iou_threshold: for NMS
- input_shape: model input size (w, h)
- original_shape: original image size (w, h)
- """
- # 1. Squeeze batch dimension
- output = np.squeeze(output) # [25200, 85]
-
- # 2. Split predictions
- boxes = output[:, :4]
- obj_conf = output[:, 4]
- class_scores = output[:, 5:]
-
- # 3. Get class with highest score
- class_ids = np.argmax(class_scores, axis=1)
- class_conf = class_scores[np.arange(len(class_scores)), class_ids]
-
- # 4. Multiply objectness confidence with class confidence
- scores = obj_conf * class_conf
-
- # 5. Filter by confidence threshold
- mask = scores > conf_threshold
- boxes = boxes[mask]
- scores = scores[mask]
- class_ids = class_ids[mask]
-
- if original_shape is not None:
- # Rescale boxes from input_shape to original image shape
- input_w, input_h = input_shape
- orig_w, orig_h = original_shape
- scale_x = orig_w / input_w
- scale_y = orig_h / input_h
-
- boxes[:, 0] *= scale_x # x1
- boxes[:, 1] *= scale_y # y1
- boxes[:, 2] *= scale_x # x2
- boxes[:, 3] *= scale_y # y2
-
- # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS
- boxes_xywh = []
- for box in boxes:
- x1, y1, x2, y2 = box
- boxes_xywh.append([x1, y1, x2 - x1, y2 - y1])
-
- # 7. Apply NMS
- indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
-
- # 8. Return filtered boxes
- result_boxes = []
- result_scores = []
- result_classes = []
- if len(boxes) > 0 and len(scores) > 0:
- indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
- if len(indices) > 0:
- indices = np.array(indices).flatten()
- for i in indices:
- i = int(i)
- result_boxes.append(boxes[i])
- result_scores.append(scores[i])
- result_classes.append(class_ids[i])
- return result_boxes, result_scores, result_classes
-
-import os
-import time
-import numpy as np
-import cv2
-from pathlib import Path
-from typing import List, Dict, Optional
-
-# Only traffic-related classes for detection
-TRAFFIC_CLASS_NAMES = [
- 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck',
- 'traffic light', 'stop sign', 'parking meter'
-]
-
class OpenVINOVehicleDetector:
def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4):
import openvino as ov
@@ -354,8 +96,11 @@ class OpenVINOVehicleDetector:
self._inference_times = []
self._start_time = time.time()
self._frame_count = 0
+
# Model selection logic
self.model_path = self._find_best_model(model_path, use_quantized)
+ print(f"🎯 OpenVINOVehicleDetector: Using model: {self.model_path}")
+
self.core = ov.Core()
self.model = self.core.read_model(self.model_path)
# Always reshape to static shape before accessing .shape
@@ -374,10 +119,47 @@ class OpenVINOVehicleDetector:
self.output_layer = self.compiled_model.output(0)
def _find_best_model(self, model_path, use_quantized):
+ # If a specific model path is provided, use it directly
+ if model_path and Path(model_path).exists():
+ print(f"🎯 Using provided model path: {model_path}")
+ return str(model_path)
+
+ # If no model path provided, extract model name from path or default to yolo11x
+ model_name = "yolo11x" # Default fallback
+ if model_path:
+ # Try to extract model name from path
+ path_obj = Path(model_path)
+ if "yolo11n" in str(path_obj).lower():
+ model_name = "yolo11n"
+ elif "yolo11s" in str(path_obj).lower():
+ model_name = "yolo11s"
+ elif "yolo11m" in str(path_obj).lower():
+ model_name = "yolo11m"
+ elif "yolo11l" in str(path_obj).lower():
+ model_name = "yolo11l"
+ elif "yolo11x" in str(path_obj).lower():
+ model_name = "yolo11x"
+
+ print(f"🔍 Searching for {model_name} model files...")
+
# Priority: quantized IR > IR > .pt
search_paths = [
- Path(model_path) if model_path else None,
- Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None,
+ Path(f"{model_name}_openvino_int8_model/{model_name}.xml") if use_quantized else None,
+ Path(f"{model_name}_openvino_model/{model_name}.xml"),
+ Path(f"rcb/{model_name}_openvino_model/{model_name}.xml"),
+ Path(f"{model_name}.xml"),
+ Path(f"rcb/{model_name}.xml"),
+ Path(f"{model_name}.pt"),
+ Path(f"rcb/{model_name}.pt")
+ ]
+
+ for p in search_paths:
+ if p and p.exists():
+ print(f"✅ Found model: {p}")
+ return str(p)
+
+ # Fallback to any yolo11x if specific model not found
+ fallback_paths = [
Path("yolo11x_openvino_model/yolo11x.xml"),
Path("rcb/yolo11x_openvino_model/yolo11x.xml"),
Path("yolo11x.xml"),
@@ -385,10 +167,13 @@ class OpenVINOVehicleDetector:
Path("yolo11x.pt"),
Path("rcb/yolo11x.pt")
]
- for p in search_paths:
+
+ for p in fallback_paths:
if p and p.exists():
+ print(f"⚠️ Using fallback model: {p}")
return str(p)
- raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.")
+
+ raise FileNotFoundError(f"No suitable {model_name} model found for OpenVINO.")
def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]:
if conf_threshold is None:
@@ -397,9 +182,9 @@ class OpenVINOVehicleDetector:
input_tensor = self._preprocess(frame)
output = self.compiled_model([input_tensor])[self.output_layer]
# Debug: print raw output shape
- print(f"[DEBUG] Model output shape: {output.shape}")
+ # print(f"[DEBUG] Model output shape: {output.shape}")
detections = self._postprocess(output, frame.shape, conf_threshold)
- print(f"[DEBUG] Detections after postprocess: {len(detections)}")
+ # print(f"[DEBUG] Detections after postprocess: {len(detections)}")
elapsed = time.time() - start
self._inference_times.append(elapsed)
self._frame_count += 1
@@ -471,7 +256,7 @@ class OpenVINOVehicleDetector:
'class_id': int(class_id),
'class_name': class_name
})
- print(f"[DEBUG] Raw detections before NMS: {len(detections)}")
+ # print(f"[DEBUG] Raw detections before NMS: {len(detections)}")
# Apply NMS
if len(detections) > 0:
boxes = np.array([det['bbox'] for det in detections])
@@ -484,7 +269,7 @@ class OpenVINOVehicleDetector:
else:
indices = []
detections = [detections[int(i)] for i in indices] if len(indices) > 0 else []
- print(f"[DEBUG] Detections after NMS: {len(detections)}")
+ # print(f"[DEBUG] Detections after NMS: {len(detections)}")
return detections
def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray:
@@ -516,661 +301,12 @@ class OpenVINOVehicleDetector:
cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness)
cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
return frame
+
+ def get_device(self):
+ """Get the device being used for inference"""
+ return self.device
-# --- Video/Image/Live Inference ---
-def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None):
- if isinstance(source, str) and not os.path.exists(source):
- print(f"Downloading sample video: {source}")
- import requests
- url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4"
- r = requests.get(url)
- with open(source, 'wb') as f:
- f.write(r.content)
- cap = cv2.VideoCapture(source)
- if not cap.isOpened():
- print(f"Failed to open video source: {source}")
- return
- window_name = "YOLOv11x + OpenVINO Detection"
- if use_popup:
- cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)
- frame_count = 0
- times = []
- while True:
- ret, frame = cap.read()
- if not ret:
- break
- if flip:
- frame = cv2.flip(frame, 1)
- if video_width:
- scale = video_width / max(frame.shape[:2])
- frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
- start = time.time()
- detections = detector.infer(frame, conf_threshold=conf_threshold)
- frame = detector.draw(frame, detections)
- elapsed = time.time() - start
- times.append(elapsed)
- if len(times) > 200:
- times.pop(0)
- fps = 1.0 / np.mean(times) if times else 0
- cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
- if use_popup:
- cv2.imshow(window_name, frame)
- if cv2.waitKey(1) & 0xFF == 27:
- break
- else:
- cv2.imshow(window_name, frame)
- if cv2.waitKey(1) & 0xFF == 27:
- break
- frame_count += 1
- cap.release()
- cv2.destroyAllWindows()
-
-# --- Main Entrypoint ---
if __name__ == "__main__":
- # Choose model: yolo11x or yolo11n, etc.
- MODEL_NAME = "yolo11x"
-
- DEVICE = "AUTO" # or "CPU", "GPU"
- # Step 1: Convert model if needed
- ov_xml = convert_yolo_to_openvino(MODEL_NAME)
- # Step 2: Quantize (optional, demo skips actual quantization)
- ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME)
- # Step 3: Create detector
- detector = OpenVINOYOLODetector(ov_xml, device=DEVICE)
- # Step 4: Run on webcam, video, or image
- # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg"
- run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280)
-# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25)
-# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25)
-# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed.
-
-import numpy as np
-import cv2
-
-def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None):
- """
- output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85])
- conf_threshold: minimum confidence
- iou_threshold: for NMS
- input_shape: model input size (w, h)
- original_shape: original image size (w, h)
- """
- # 1. Squeeze batch dimension
- output = np.squeeze(output) # [25200, 85]
-
- # 2. Split predictions
- boxes = output[:, :4]
- obj_conf = output[:, 4]
- class_scores = output[:, 5:]
-
- # 3. Get class with highest score
- class_ids = np.argmax(class_scores, axis=1)
- class_conf = class_scores[np.arange(len(class_scores)), class_ids]
-
- # 4. Multiply objectness confidence with class confidence
- scores = obj_conf * class_conf
-
- # 5. Filter by confidence threshold
- mask = scores > conf_threshold
- boxes = boxes[mask]
- scores = scores[mask]
- class_ids = class_ids[mask]
-
- if original_shape is not None:
- # Rescale boxes from input_shape to original image shape
- input_w, input_h = input_shape
- orig_w, orig_h = original_shape
- scale_x = orig_w / input_w
- scale_y = orig_h / input_h
-
- boxes[:, 0] *= scale_x # x1
- boxes[:, 1] *= scale_y # y1
- boxes[:, 2] *= scale_x # x2
- boxes[:, 3] *= scale_y # y2
-
- # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS
- boxes_xywh = []
- for box in boxes:
- x1, y1, x2, y2 = box
- boxes_xywh.append([x1, y1, x2 - x1, y2 - y1])
-
- # 7. Apply NMS
- indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
-
- # 8. Return filtered boxes
- result_boxes = []
- result_scores = []
- result_classes = []
- if len(boxes) > 0 and len(scores) > 0:
- indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
- if len(indices) > 0:
- indices = np.array(indices).flatten()
- for i in indices:
- i = int(i)
- result_boxes.append(boxes[i])
- result_scores.append(scores[i])
- result_classes.append(class_ids[i])
- return result_boxes, result_scores, result_classes
-
-import os
-import time
-import numpy as np
-import cv2
-from pathlib import Path
-from typing import List, Dict, Optional
-
-# Only traffic-related classes for detection
-TRAFFIC_CLASS_NAMES = [
- 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck',
- 'traffic light', 'stop sign', 'parking meter'
-]
-
-class OpenVINOVehicleDetector:
- def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4):
- import openvino as ov
- self.device = device
- self.confidence_threshold = confidence_threshold
- self.ocr_reader = None
- self.class_names = TRAFFIC_CLASS_NAMES
- self.performance_stats = {
- 'fps': 0,
- 'avg_inference_time': 0,
- 'frames_processed': 0,
- 'backend': f"OpenVINO-{device}",
- 'total_detections': 0,
- 'detection_rate': 0
- }
- self._inference_times = []
- self._start_time = time.time()
- self._frame_count = 0
- # Model selection logic
- self.model_path = self._find_best_model(model_path, use_quantized)
- self.core = ov.Core()
- self.model = self.core.read_model(self.model_path)
- # Always reshape to static shape before accessing .shape
- self.model.reshape({0: [1, 3, 640, 640]})
- self.input_shape = self.model.inputs[0].shape
- self.input_height = self.input_shape[2]
- self.input_width = self.input_shape[3]
- self.ov_config = {}
- if device != "CPU":
- # Already reshaped above, so nothing more needed here
- pass
- if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices):
- self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
- self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config)
-
- self.output_layer = self.compiled_model.output(0)
-
- def _find_best_model(self, model_path, use_quantized):
- # Priority: quantized IR > IR > .pt
- search_paths = [
- Path(model_path) if model_path else None,
- Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None,
- Path("yolo11x_openvino_model/yolo11x.xml"),
- Path("rcb/yolo11x_openvino_model/yolo11x.xml"),
- Path("yolo11x.xml"),
- Path("rcb/yolo11x.xml"),
- Path("yolo11x.pt"),
- Path("rcb/yolo11x.pt")
- ]
- for p in search_paths:
- if p and p.exists():
- return str(p)
- raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.")
-
- def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]:
- if conf_threshold is None:
- conf_threshold = 0.1 # Lowered for debugging
- start = time.time()
- input_tensor = self._preprocess(frame)
- output = self.compiled_model([input_tensor])[self.output_layer]
- # Debug: print raw output shape
- print(f"[DEBUG] Model output shape: {output.shape}")
- detections = self._postprocess(output, frame.shape, conf_threshold)
- print(f"[DEBUG] Detections after postprocess: {len(detections)}")
- elapsed = time.time() - start
- self._inference_times.append(elapsed)
- self._frame_count += 1
- self.performance_stats['frames_processed'] = self._frame_count
- self.performance_stats['total_detections'] += len(detections)
- if len(self._inference_times) > 100:
- self._inference_times.pop(0)
- self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0
- total_time = time.time() - self._start_time
- self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0
- return detections
-
- def _preprocess(self, frame: np.ndarray) -> np.ndarray:
- img = cv2.resize(frame, (self.input_width, self.input_height))
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img = img.astype(np.float32) / 255.0
- img = img.transpose(2, 0, 1)[None]
- return img
-
- def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]:
- # Output: (1, 84, 8400) or (84, 8400) or (8400, 84)
- if output.ndim == 3:
- output = np.squeeze(output)
- if output.shape[0] == 84:
- output = output.T # (8400, 84)
- boxes = output[:, :4]
- scores = output[:, 4:]
- class_ids = np.argmax(scores, axis=1)
- confidences = np.max(scores, axis=1)
- detections = []
- h, w = frame_shape[:2]
- for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)):
- if score < conf_threshold:
- continue
- x_c, y_c, bw, bh = box
- # If normalized, scale to input size
- if all(0.0 <= v <= 1.0 for v in box):
- x_c *= self.input_width
- y_c *= self.input_height
- bw *= self.input_width
- bh *= self.input_height
- # Scale to original frame size
- scale_x = w / self.input_width
- scale_y = h / self.input_height
- x_c *= scale_x
- y_c *= scale_y
- bw *= scale_x
- bh *= scale_y
- x1 = int(round(x_c - bw / 2))
- y1 = int(round(y_c - bh / 2))
- x2 = int(round(x_c + bw / 2))
- y2 = int(round(y_c + bh / 2))
- x1 = max(0, min(x1, w - 1))
- y1 = max(0, min(y1, h - 1))
- x2 = max(0, min(x2, w - 1))
- y2 = max(0, min(y2, h - 1))
- if x2 <= x1 or y2 <= y1:
- continue
- # Only keep class 9 as traffic light, rename if found
- if class_id == 9:
- class_name = "traffic light"
- elif class_id < len(TRAFFIC_CLASS_NAMES):
- class_name = TRAFFIC_CLASS_NAMES[class_id]
- else:
- continue # Remove unknown/other classes
- detections.append({
- 'bbox': [x1, y1, x2, y2],
- 'confidence': float(score),
- 'class_id': int(class_id),
- 'class_name': class_name
- })
- print(f"[DEBUG] Raw detections before NMS: {len(detections)}")
- # Apply NMS
- if len(detections) > 0:
- boxes = np.array([det['bbox'] for det in detections])
- scores = np.array([det['confidence'] for det in detections])
- indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5)
- if isinstance(indices, (list, tuple)) and len(indices) > 0:
- indices = np.array(indices).flatten()
- elif isinstance(indices, np.ndarray) and indices.size > 0:
- indices = indices.flatten()
- else:
- indices = []
- detections = [detections[int(i)] for i in indices] if len(indices) > 0 else []
- print(f"[DEBUG] Detections after NMS: {len(detections)}")
- return detections
-
- def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray:
- # 80+ visually distinct colors for COCO classes (BGR)
- COCO_COLORS = [
- (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49),
- (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187),
- (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236),
- (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199),
- (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49),
- (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187),
- (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236),
- (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199),
- (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49),
- (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187),
- (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
- (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
- (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
- (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
- (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49)
- ]
- for det in detections:
- x1, y1, x2, y2 = det['bbox']
- label = f"{det['class_name']} {det['confidence']:.2f}"
- color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)]
- cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness)
- cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
- return frame
-
-# --- Video/Image/Live Inference ---
-def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None):
- if isinstance(source, str) and not os.path.exists(source):
- print(f"Downloading sample video: {source}")
- import requests
- url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4"
- r = requests.get(url)
- with open(source, 'wb') as f:
- f.write(r.content)
- cap = cv2.VideoCapture(source)
- if not cap.isOpened():
- print(f"Failed to open video source: {source}")
- return
- window_name = "YOLOv11x + OpenVINO Detection"
- if use_popup:
- cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)
- frame_count = 0
- times = []
- while True:
- ret, frame = cap.read()
- if not ret:
- break
- if flip:
- frame = cv2.flip(frame, 1)
- if video_width:
- scale = video_width / max(frame.shape[:2])
- frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
- start = time.time()
- detections = detector.infer(frame, conf_threshold=conf_threshold)
- frame = detector.draw(frame, detections)
- elapsed = time.time() - start
- times.append(elapsed)
- if len(times) > 200:
- times.pop(0)
- fps = 1.0 / np.mean(times) if times else 0
- cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
- if use_popup:
- cv2.imshow(window_name, frame)
- if cv2.waitKey(1) & 0xFF == 27:
- break
- else:
- cv2.imshow(window_name, frame)
- if cv2.waitKey(1) & 0xFF == 27:
- break
- frame_count += 1
- cap.release()
- cv2.destroyAllWindows()
-
-# --- Main Entrypoint ---
-if __name__ == "__main__":
- # Choose model: yolo11x or yolo11n, etc.
- MODEL_NAME = "yolo11x"
-
- DEVICE = "AUTO" # or "CPU", "GPU"
- # Step 1: Convert model if needed
- ov_xml = convert_yolo_to_openvino(MODEL_NAME)
- # Step 2: Quantize (optional, demo skips actual quantization)
- ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME)
- # Step 3: Create detector
- detector = OpenVINOYOLODetector(ov_xml, device=DEVICE)
- # Step 4: Run on webcam, video, or image
- # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg"
- run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280)
-# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25)
-# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25)
-# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed.
-
-import numpy as np
-import cv2
-
-def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None):
- """
- output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85])
- conf_threshold: minimum confidence
- iou_threshold: for NMS
- input_shape: model input size (w, h)
- original_shape: original image size (w, h)
- """
- # 1. Squeeze batch dimension
- output = np.squeeze(output) # [25200, 85]
-
- # 2. Split predictions
- boxes = output[:, :4]
- obj_conf = output[:, 4]
- class_scores = output[:, 5:]
-
- # 3. Get class with highest score
- class_ids = np.argmax(class_scores, axis=1)
- class_conf = class_scores[np.arange(len(class_scores)), class_ids]
-
- # 4. Multiply objectness confidence with class confidence
- scores = obj_conf * class_conf
-
- # 5. Filter by confidence threshold
- mask = scores > conf_threshold
- boxes = boxes[mask]
- scores = scores[mask]
- class_ids = class_ids[mask]
-
- if original_shape is not None:
- # Rescale boxes from input_shape to original image shape
- input_w, input_h = input_shape
- orig_w, orig_h = original_shape
- scale_x = orig_w / input_w
- scale_y = orig_h / input_h
-
- boxes[:, 0] *= scale_x # x1
- boxes[:, 1] *= scale_y # y1
- boxes[:, 2] *= scale_x # x2
- boxes[:, 3] *= scale_y # y2
-
- # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS
- boxes_xywh = []
- for box in boxes:
- x1, y1, x2, y2 = box
- boxes_xywh.append([x1, y1, x2 - x1, y2 - y1])
-
- # 7. Apply NMS
- indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
-
- # 8. Return filtered boxes
- result_boxes = []
- result_scores = []
- result_classes = []
- if len(boxes) > 0 and len(scores) > 0:
- indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
- if len(indices) > 0:
- indices = np.array(indices).flatten()
- for i in indices:
- i = int(i)
- result_boxes.append(boxes[i])
- result_scores.append(scores[i])
- result_classes.append(class_ids[i])
- return result_boxes, result_scores, result_classes
-
-import os
-import time
-import numpy as np
-import cv2
-from pathlib import Path
-from typing import List, Dict, Optional
-
-# Only traffic-related classes for detection
-TRAFFIC_CLASS_NAMES = [
- 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck',
- 'traffic light', 'stop sign', 'parking meter'
-]
-
-class OpenVINOVehicleDetector:
- def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4):
- import openvino as ov
- self.device = device
- self.confidence_threshold = confidence_threshold
- self.ocr_reader = None
- self.class_names = TRAFFIC_CLASS_NAMES
- self.performance_stats = {
- 'fps': 0,
- 'avg_inference_time': 0,
- 'frames_processed': 0,
- 'backend': f"OpenVINO-{device}",
- 'total_detections': 0,
- 'detection_rate': 0
- }
- self._inference_times = []
- self._start_time = time.time()
- self._frame_count = 0
- # Model selection logic
- self.model_path = self._find_best_model(model_path, use_quantized)
- self.core = ov.Core()
- self.model = self.core.read_model(self.model_path)
- # Always reshape to static shape before accessing .shape
- self.model.reshape({0: [1, 3, 640, 640]})
- self.input_shape = self.model.inputs[0].shape
- self.input_height = self.input_shape[2]
- self.input_width = self.input_shape[3]
- self.ov_config = {}
- if device != "CPU":
- # Already reshaped above, so nothing more needed here
- pass
- if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices):
- self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
- self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config)
-
- self.output_layer = self.compiled_model.output(0)
-
- def _find_best_model(self, model_path, use_quantized):
- # Priority: quantized IR > IR > .pt
- search_paths = [
- Path(model_path) if model_path else None,
- Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None,
- Path("yolo11x_openvino_model/yolo11x.xml"),
- Path("rcb/yolo11x_openvino_model/yolo11x.xml"),
- Path("yolo11x.xml"),
- Path("rcb/yolo11x.xml"),
- Path("yolo11x.pt"),
- Path("rcb/yolo11x.pt")
- ]
- for p in search_paths:
- if p and p.exists():
- return str(p)
- raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.")
-
- def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]:
- if conf_threshold is None:
- conf_threshold = 0.1 # Lowered for debugging
- start = time.time()
- input_tensor = self._preprocess(frame)
- output = self.compiled_model([input_tensor])[self.output_layer]
- # Debug: print raw output shape
- print(f"[DEBUG] Model output shape: {output.shape}")
- detections = self._postprocess(output, frame.shape, conf_threshold)
- print(f"[DEBUG] Detections after postprocess: {len(detections)}")
- elapsed = time.time() - start
- self._inference_times.append(elapsed)
- self._frame_count += 1
- self.performance_stats['frames_processed'] = self._frame_count
- self.performance_stats['total_detections'] += len(detections)
- if len(self._inference_times) > 100:
- self._inference_times.pop(0)
- self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0
- total_time = time.time() - self._start_time
- self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0
- return detections
-
- def _preprocess(self, frame: np.ndarray) -> np.ndarray:
- img = cv2.resize(frame, (self.input_width, self.input_height))
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img = img.astype(np.float32) / 255.0
- img = img.transpose(2, 0, 1)[None]
- return img
-
- def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]:
- # Output: (1, 84, 8400) or (84, 8400) or (8400, 84)
- if output.ndim == 3:
- output = np.squeeze(output)
- if output.shape[0] == 84:
- output = output.T # (8400, 84)
- boxes = output[:, :4]
- scores = output[:, 4:]
- class_ids = np.argmax(scores, axis=1)
- confidences = np.max(scores, axis=1)
- detections = []
- h, w = frame_shape[:2]
- for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)):
- if score < conf_threshold:
- continue
- x_c, y_c, bw, bh = box
- # If normalized, scale to input size
- if all(0.0 <= v <= 1.0 for v in box):
- x_c *= self.input_width
- y_c *= self.input_height
- bw *= self.input_width
- bh *= self.input_height
- # Scale to original frame size
- scale_x = w / self.input_width
- scale_y = h / self.input_height
- x_c *= scale_x
- y_c *= scale_y
- bw *= scale_x
- bh *= scale_y
- x1 = int(round(x_c - bw / 2))
- y1 = int(round(y_c - bh / 2))
- x2 = int(round(x_c + bw / 2))
- y2 = int(round(y_c + bh / 2))
- x1 = max(0, min(x1, w - 1))
- y1 = max(0, min(y1, h - 1))
- x2 = max(0, min(x2, w - 1))
- y2 = max(0, min(y2, h - 1))
- if x2 <= x1 or y2 <= y1:
- continue
- # Only keep class 9 as traffic light, rename if found
- if class_id == 9:
- class_name = "traffic light"
- elif class_id < len(TRAFFIC_CLASS_NAMES):
- class_name = TRAFFIC_CLASS_NAMES[class_id]
- else:
- continue # Remove unknown/other classes
- detections.append({
- 'bbox': [x1, y1, x2, y2],
- 'confidence': float(score),
- 'class_id': int(class_id),
- 'class_name': class_name
- })
- print(f"[DEBUG] Raw detections before NMS: {len(detections)}")
- # Apply NMS
- if len(detections) > 0:
- boxes = np.array([det['bbox'] for det in detections])
- scores = np.array([det['confidence'] for det in detections])
- indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5)
- if isinstance(indices, (list, tuple)) and len(indices) > 0:
- indices = np.array(indices).flatten()
- elif isinstance(indices, np.ndarray) and indices.size > 0:
- indices = indices.flatten()
- else:
- indices = []
- detections = [detections[int(i)] for i in indices] if len(indices) > 0 else []
- print(f"[DEBUG] Detections after NMS: {len(detections)}")
- return detections
-
- def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray:
- # 80+ visually distinct colors for COCO classes (BGR)
- COCO_COLORS = [
- (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49),
- (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187),
- (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236),
- (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199),
- (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49),
- (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187),
- (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236),
- (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199),
- (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49),
- (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187),
- (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
- (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
- (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
- (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
- (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
- (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49)
- ]
- for det in detections:
- x1, y1, x2, y2 = det['bbox']
- label = f"{det['class_name']} {det['confidence']:.2f}"
- color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)]
- cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness)
- cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
- return frame
\ No newline at end of file
+ # Test the detector with YOLOv11n model
+ detector = OpenVINOVehicleDetector(model_path="yolo11n_openvino_model/yolo11n.xml")
+ print(f"Detector initialized with model: {detector.model_path}")
diff --git a/detection_openvino_async.py b/detection_openvino_async.py
index 6ea61ad..3c99641 100644
--- a/detection_openvino_async.py
+++ b/detection_openvino_async.py
@@ -1691,4 +1691,1189 @@ def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, inp
result_boxes.append(boxes[i])
result_scores.append(scores[i])
result_classes.append(class_ids[i])
- return result_boxes, result_scores, result_classes
\ No newline at end of file
+ return result_boxes, result_scores, result_classes
+
+
+
+
+
+
+
+
+
+# Detection logic using OpenVINO models (YOLO, etc.)
+
+import os
+import sys
+import time
+import cv2
+import numpy as np
+from pathlib import Path
+from typing import List, Dict, Tuple, Optional
+from red_light_violation_pipeline import RedLightViolationPipeline
+
+# --- Install required packages if missing ---
+try:
+ import openvino as ov
+except ImportError:
+ print("Installing openvino...")
+ os.system('pip install --quiet "openvino>=2024.0.0"')
+ import openvino as ov
+try:
+ from ultralytics import YOLO
+except ImportError:
+ print("Installing ultralytics...")
+ os.system('pip install --quiet "ultralytics==8.3.0"')
+ from ultralytics import YOLO
+try:
+ import nncf
+except ImportError:
+ print("Installing nncf...")
+ os.system('pip install --quiet "nncf>=2.9.0"')
+ import nncf
+
+# --- COCO dataset class names ---
+COCO_CLASSES = {
+ 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus',
+ 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant',
+ 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat',
+ 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear',
+ 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag',
+ 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard',
+ 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove',
+ 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle',
+ 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl',
+ 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli',
+ 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair',
+ 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet',
+ 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard',
+ 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink',
+ 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors',
+ 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'
+}
+
+# Traffic-related classes we're interested in (using standard COCO indices)
+TRAFFIC_CLASS_NAMES = COCO_CLASSES
+
+# --- Model Conversion and Quantization ---
+def convert_yolo_to_openvino(model_name: str = "yolo11x", half: bool = True) -> Path:
+ """Convert YOLOv11x PyTorch model to OpenVINO IR format."""
+ pt_path = Path(f"{model_name}.pt")
+ ov_dir = Path(f"{model_name}_openvino_model")
+ ov_xml = ov_dir / f"{model_name}.xml"
+ if not ov_xml.exists():
+ print(f"Exporting {pt_path} to OpenVINO IR...")
+ model = YOLO(str(pt_path))
+ model.export(format="openvino", dynamic=True, half=half)
+ else:
+ print(f"OpenVINO IR already exists: {ov_xml}")
+ return ov_xml
+
+def quantize_openvino_model(ov_xml: Path, model_name: str = "yolo11x") -> Path:
+ """Quantize OpenVINO IR model to INT8 using NNCF."""
+ int8_dir = Path(f"{model_name}_openvino_int8_model")
+ int8_xml = int8_dir / f"{model_name}.xml"
+ if int8_xml.exists():
+ print(f"INT8 model already exists: {int8_xml}")
+ return int8_xml
+ print("Quantization requires a calibration dataset. Skipping actual quantization in this demo.")
+ return ov_xml # Return FP32 if no quantization
+
+# --- OpenVINO Inference Pipeline ---
+class OpenVINOYOLODetector:
+ def __init__(self, model_xml: Path, device: str = "AUTO"):
+ self.core = ov.Core()
+ self.device = device
+ self.model = self.core.read_model(model_xml)
+ self.input_shape = self.model.inputs[0].shape
+ self.input_height = self.input_shape[2]
+ self.input_width = self.input_shape[3]
+ self.ov_config = {}
+ if device != "CPU":
+ self.model.reshape({0: [1, 3, 640, 640]})
+ if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices):
+ self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
+ self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config)
+ self.output_layer = self.compiled_model.output(0)
+
+ def preprocess(self, frame: np.ndarray) -> np.ndarray:
+ img = cv2.resize(frame, (self.input_width, self.input_height))
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ img = img.astype(np.float32) / 255.0
+ img = img.transpose(2, 0, 1)[None]
+ return img
+
+ def infer(self, frame: np.ndarray, conf_threshold: float = 0.25) -> List[Dict]:
+ input_tensor = self.preprocess(frame)
+ output = self.compiled_model([input_tensor])[self.output_layer]
+ return self.postprocess(output, frame.shape, conf_threshold)
+
+ def postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]:
+ # Output: (1, 84, 8400) or (84, 8400) or (8400, 84)
+ if output.ndim == 3:
+ output = np.squeeze(output)
+ if output.shape[0] == 84:
+ output = output.T # (8400, 84)
+ boxes = output[:, :4]
+ scores = output[:, 4:]
+ class_ids = np.argmax(scores, axis=1)
+ confidences = np.max(scores, axis=1)
+ detections = []
+ h, w = frame_shape[:2]
+ for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)):
+ if score < conf_threshold:
+ continue
+ x_c, y_c, bw, bh = box
+ # If normalized, scale to input size
+ if all(0.0 <= v <= 1.0 for v in box):
+ x_c *= self.input_width
+ y_c *= self.input_height
+ bw *= self.input_width
+ bh *= self.input_height
+ # Scale to original frame size
+ scale_x = w / self.input_width
+ scale_y = h / self.input_height
+ x_c *= scale_x
+ y_c *= scale_y
+ bw *= scale_x
+ bh *= scale_y
+ x1 = int(round(x_c - bw / 2))
+ y1 = int(round(y_c - bh / 2))
+ x2 = int(round(x_c + bw / 2))
+ y2 = int(round(y_c + bh / 2))
+ x1 = max(0, min(x1, w - 1))
+ y1 = max(0, min(y1, h - 1))
+ x2 = max(0, min(x2, w - 1))
+ y2 = max(0, min(y2, h - 1))
+ if x2 <= x1 or y2 <= y1:
+ continue
+ # Only keep class 9 as traffic light, rename if found
+ if class_id == 9:
+ class_name = "traffic light"
+ elif class_id < len(TRAFFIC_CLASS_NAMES):
+ class_name = TRAFFIC_CLASS_NAMES[class_id]
+ else:
+ continue # Remove unknown/other classes
+ detections.append({
+ 'bbox': [x1, y1, x2, y2],
+ 'confidence': float(score),
+ 'class_id': int(class_id),
+ 'class_name': class_name
+ })
+ return detections
+
+ def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray:
+ # 80+ visually distinct colors for COCO classes (BGR)
+ COCO_COLORS = [
+ (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49),
+ (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187),
+ (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236),
+ (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199),
+ (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49),
+ (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187),
+ (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236),
+ (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199),
+ (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49),
+ (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187),
+ (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
+ (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
+ (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
+ (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
+ (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49)
+ ]
+ for det in detections:
+ x1, y1, x2, y2 = det['bbox']
+ label = f"{det['class_name']} {det['confidence']:.2f}"
+ color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)]
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness)
+ cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
+ return frame
+
+# --- Video/Image/Live Inference ---
+def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None):
+ if isinstance(source, str) and not os.path.exists(source):
+ print(f"Downloading sample video: {source}")
+ import requests
+ url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4"
+ r = requests.get(url)
+ with open(source, 'wb') as f:
+ f.write(r.content)
+ cap = cv2.VideoCapture(source)
+ if not cap.isOpened():
+ print(f"Failed to open video source: {source}")
+ return
+ window_name = "YOLOv11x + OpenVINO Detection"
+ if use_popup:
+ cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)
+ frame_count = 0
+ times = []
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+ if flip:
+ frame = cv2.flip(frame, 1)
+ if video_width:
+ scale = video_width / max(frame.shape[:2])
+ frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
+ start = time.time()
+ detections = detector.infer(frame, conf_threshold=conf_threshold)
+ frame = detector.draw(frame, detections)
+ elapsed = time.time() - start
+ times.append(elapsed)
+ if len(times) > 200:
+ times.pop(0)
+ fps = 1.0 / np.mean(times) if times else 0
+ cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
+ if use_popup:
+ cv2.imshow(window_name, frame)
+ if cv2.waitKey(1) & 0xFF == 27:
+ break
+ else:
+ cv2.imshow(window_name, frame)
+ if cv2.waitKey(1) & 0xFF == 27:
+ break
+ frame_count += 1
+ cap.release()
+ cv2.destroyAllWindows()
+
+# --- Main Entrypoint ---
+if __name__ == "__main__":
+ # Choose model: yolo11x or yolo11n, etc.
+ MODEL_NAME = "yolo11x"
+ DEVICE = "AUTO" # or "CPU", "GPU"
+ # Step 1: Convert model if needed
+ ov_xml = convert_yolo_to_openvino(MODEL_NAME)
+ # Step 2: Quantize (optional, demo skips actual quantization)
+ ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME)
+ # Step 3: Create detector
+ detector = OpenVINOYOLODetector(ov_xml, device=DEVICE)
+ # Step 4: Run on webcam, video, or image
+ # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg"
+ run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280)
+# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25)
+# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25)
+# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed.
+
+import numpy as np
+import cv2
+
+def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None):
+ """
+ output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85])
+ conf_threshold: minimum confidence
+ iou_threshold: for NMS
+ input_shape: model input size (w, h)
+ original_shape: original image size (w, h)
+ """
+ # 1. Squeeze batch dimension
+ output = np.squeeze(output) # [25200, 85]
+
+ # 2. Split predictions
+ boxes = output[:, :4]
+ obj_conf = output[:, 4]
+ class_scores = output[:, 5:]
+
+ # 3. Get class with highest score
+ class_ids = np.argmax(class_scores, axis=1)
+ class_conf = class_scores[np.arange(len(class_scores)), class_ids]
+
+ # 4. Multiply objectness confidence with class confidence
+ scores = obj_conf * class_conf
+
+ # 5. Filter by confidence threshold
+ mask = scores > conf_threshold
+ boxes = boxes[mask]
+ scores = scores[mask]
+ class_ids = class_ids[mask]
+
+ if original_shape is not None:
+ # Rescale boxes from input_shape to original image shape
+ input_w, input_h = input_shape
+ orig_w, orig_h = original_shape
+ scale_x = orig_w / input_w
+ scale_y = orig_h / input_h
+
+ boxes[:, 0] *= scale_x # x1
+ boxes[:, 1] *= scale_y # y1
+ boxes[:, 2] *= scale_x # x2
+ boxes[:, 3] *= scale_y # y2
+
+ # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS
+ boxes_xywh = []
+ for box in boxes:
+ x1, y1, x2, y2 = box
+ boxes_xywh.append([x1, y1, x2 - x1, y2 - y1])
+
+ # 7. Apply NMS
+ indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
+
+ # 8. Return filtered boxes
+ result_boxes = []
+ result_scores = []
+ result_classes = []
+ if len(boxes) > 0 and len(scores) > 0:
+ indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
+ if len(indices) > 0:
+ indices = np.array(indices).flatten()
+ for i in indices:
+ i = int(i)
+ result_boxes.append(boxes[i])
+ result_scores.append(scores[i])
+ result_classes.append(class_ids[i])
+ return result_boxes, result_scores, result_classes
+
+import os
+import time
+import numpy as np
+import cv2
+from pathlib import Path
+from typing import List, Dict, Optional
+
+# Only traffic-related classes for detection
+TRAFFIC_CLASS_NAMES = [
+ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck',
+ 'traffic light', 'stop sign', 'parking meter'
+]
+
+class OpenVINOVehicleDetector:
+ def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4):
+ import openvino as ov
+ self.device = device
+ self.confidence_threshold = confidence_threshold
+ self.ocr_reader = None
+ self.class_names = TRAFFIC_CLASS_NAMES
+ self.performance_stats = {
+ 'fps': 0,
+ 'avg_inference_time': 0,
+ 'frames_processed': 0,
+ 'backend': f"OpenVINO-{device}",
+ 'total_detections': 0,
+ 'detection_rate': 0
+ }
+ self._inference_times = []
+ self._start_time = time.time()
+ self._frame_count = 0
+ # Model selection logic
+ self.model_path = self._find_best_model(model_path, use_quantized)
+ self.core = ov.Core()
+ self.model = self.core.read_model(self.model_path)
+ # Always reshape to static shape before accessing .shape
+ self.model.reshape({0: [1, 3, 640, 640]})
+ self.input_shape = self.model.inputs[0].shape
+ self.input_height = self.input_shape[2]
+ self.input_width = self.input_shape[3]
+ self.ov_config = {}
+ if device != "CPU":
+ # Already reshaped above, so nothing more needed here
+ pass
+ if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices):
+ self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
+ self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config)
+
+ self.output_layer = self.compiled_model.output(0)
+
+ def _find_best_model(self, model_path, use_quantized):
+ # Priority: quantized IR > IR > .pt
+ search_paths = [
+ Path(model_path) if model_path else None,
+ Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None,
+ Path("yolo11x_openvino_model/yolo11x.xml"),
+ Path("rcb/yolo11x_openvino_model/yolo11x.xml"),
+ Path("yolo11x.xml"),
+ Path("rcb/yolo11x.xml"),
+ Path("yolo11x.pt"),
+ Path("rcb/yolo11x.pt")
+ ]
+ for p in search_paths:
+ if p and p.exists():
+ return str(p)
+ raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.")
+
+ def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]:
+ if conf_threshold is None:
+ conf_threshold = 0.1 # Lowered for debugging
+ start = time.time()
+ input_tensor = self._preprocess(frame)
+ output = self.compiled_model([input_tensor])[self.output_layer]
+ # Debug: print raw output shape
+ print(f"[DEBUG] Model output shape: {output.shape}")
+ detections = self._postprocess(output, frame.shape, conf_threshold)
+ print(f"[DEBUG] Detections after postprocess: {len(detections)}")
+ elapsed = time.time() - start
+ self._inference_times.append(elapsed)
+ self._frame_count += 1
+ self.performance_stats['frames_processed'] = self._frame_count
+ self.performance_stats['total_detections'] += len(detections)
+ if len(self._inference_times) > 100:
+ self._inference_times.pop(0)
+ self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0
+ total_time = time.time() - self._start_time
+ self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0
+ return detections
+
+ def _preprocess(self, frame: np.ndarray) -> np.ndarray:
+ img = cv2.resize(frame, (self.input_width, self.input_height))
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ img = img.astype(np.float32) / 255.0
+ img = img.transpose(2, 0, 1)[None]
+ return img
+
+ def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]:
+ # Output: (1, 84, 8400) or (84, 8400) or (8400, 84)
+ if output.ndim == 3:
+ output = np.squeeze(output)
+ if output.shape[0] == 84:
+ output = output.T # (8400, 84)
+ boxes = output[:, :4]
+ scores = output[:, 4:]
+ class_ids = np.argmax(scores, axis=1)
+ confidences = np.max(scores, axis=1)
+ detections = []
+ h, w = frame_shape[:2]
+ for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)):
+ if score < conf_threshold:
+ continue
+ x_c, y_c, bw, bh = box
+ # If normalized, scale to input size
+ if all(0.0 <= v <= 1.0 for v in box):
+ x_c *= self.input_width
+ y_c *= self.input_height
+ bw *= self.input_width
+ bh *= self.input_height
+ # Scale to original frame size
+ scale_x = w / self.input_width
+ scale_y = h / self.input_height
+ x_c *= scale_x
+ y_c *= scale_y
+ bw *= scale_x
+ bh *= scale_y
+ x1 = int(round(x_c - bw / 2))
+ y1 = int(round(y_c - bh / 2))
+ x2 = int(round(x_c + bw / 2))
+ y2 = int(round(y_c + bh / 2))
+ x1 = max(0, min(x1, w - 1))
+ y1 = max(0, min(y1, h - 1))
+ x2 = max(0, min(x2, w - 1))
+ y2 = max(0, min(y2, h - 1))
+ if x2 <= x1 or y2 <= y1:
+ continue
+ # Only keep class 9 as traffic light, rename if found
+ if class_id == 9:
+ class_name = "traffic light"
+ elif class_id < len(TRAFFIC_CLASS_NAMES):
+ class_name = TRAFFIC_CLASS_NAMES[class_id]
+ else:
+ continue # Remove unknown/other classes
+ detections.append({
+ 'bbox': [x1, y1, x2, y2],
+ 'confidence': float(score),
+ 'class_id': int(class_id),
+ 'class_name': class_name
+ })
+ print(f"[DEBUG] Raw detections before NMS: {len(detections)}")
+ # Apply NMS
+ if len(detections) > 0:
+ boxes = np.array([det['bbox'] for det in detections])
+ scores = np.array([det['confidence'] for det in detections])
+ indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5)
+ if isinstance(indices, (list, tuple)) and len(indices) > 0:
+ indices = np.array(indices).flatten()
+ elif isinstance(indices, np.ndarray) and indices.size > 0:
+ indices = indices.flatten()
+ else:
+ indices = []
+ detections = [detections[int(i)] for i in indices] if len(indices) > 0 else []
+ print(f"[DEBUG] Detections after NMS: {len(detections)}")
+ return detections
+
+ def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray:
+ # 80+ visually distinct colors for COCO classes (BGR)
+ COCO_COLORS = [
+ (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49),
+ (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187),
+ (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236),
+ (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199),
+ (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49),
+ (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187),
+ (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236),
+ (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199),
+ (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49),
+ (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187),
+ (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
+ (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
+ (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
+ (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
+ (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49)
+ ]
+ for det in detections:
+ x1, y1, x2, y2 = det['bbox']
+ label = f"{det['class_name']} {det['confidence']:.2f}"
+ color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)]
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness)
+ cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
+ return frame
+
+# --- Video/Image/Live Inference ---
+def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None):
+ if isinstance(source, str) and not os.path.exists(source):
+ print(f"Downloading sample video: {source}")
+ import requests
+ url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4"
+ r = requests.get(url)
+ with open(source, 'wb') as f:
+ f.write(r.content)
+ cap = cv2.VideoCapture(source)
+ if not cap.isOpened():
+ print(f"Failed to open video source: {source}")
+ return
+ window_name = "YOLOv11x + OpenVINO Detection"
+ if use_popup:
+ cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)
+ frame_count = 0
+ times = []
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+ if flip:
+ frame = cv2.flip(frame, 1)
+ if video_width:
+ scale = video_width / max(frame.shape[:2])
+ frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
+ start = time.time()
+ detections = detector.infer(frame, conf_threshold=conf_threshold)
+ frame = detector.draw(frame, detections)
+ elapsed = time.time() - start
+ times.append(elapsed)
+ if len(times) > 200:
+ times.pop(0)
+ fps = 1.0 / np.mean(times) if times else 0
+ cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
+ if use_popup:
+ cv2.imshow(window_name, frame)
+ if cv2.waitKey(1) & 0xFF == 27:
+ break
+ else:
+ cv2.imshow(window_name, frame)
+ if cv2.waitKey(1) & 0xFF == 27:
+ break
+ frame_count += 1
+ cap.release()
+ cv2.destroyAllWindows()
+
+# --- Main Entrypoint ---
+if __name__ == "__main__":
+ # Choose model: yolo11x or yolo11n, etc.
+ MODEL_NAME = "yolo11x"
+
+ DEVICE = "AUTO" # or "CPU", "GPU"
+ # Step 1: Convert model if needed
+ ov_xml = convert_yolo_to_openvino(MODEL_NAME)
+ # Step 2: Quantize (optional, demo skips actual quantization)
+ ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME)
+ # Step 3: Create detector
+ detector = OpenVINOYOLODetector(ov_xml, device=DEVICE)
+ # Step 4: Run on webcam, video, or image
+ # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg"
+ run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280)
+# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25)
+# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25)
+# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed.
+
+import numpy as np
+import cv2
+
+def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None):
+ """
+ output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85])
+ conf_threshold: minimum confidence
+ iou_threshold: for NMS
+ input_shape: model input size (w, h)
+ original_shape: original image size (w, h)
+ """
+ # 1. Squeeze batch dimension
+ output = np.squeeze(output) # [25200, 85]
+
+ # 2. Split predictions
+ boxes = output[:, :4]
+ obj_conf = output[:, 4]
+ class_scores = output[:, 5:]
+
+ # 3. Get class with highest score
+ class_ids = np.argmax(class_scores, axis=1)
+ class_conf = class_scores[np.arange(len(class_scores)), class_ids]
+
+ # 4. Multiply objectness confidence with class confidence
+ scores = obj_conf * class_conf
+
+ # 5. Filter by confidence threshold
+ mask = scores > conf_threshold
+ boxes = boxes[mask]
+ scores = scores[mask]
+ class_ids = class_ids[mask]
+
+ if original_shape is not None:
+ # Rescale boxes from input_shape to original image shape
+ input_w, input_h = input_shape
+ orig_w, orig_h = original_shape
+ scale_x = orig_w / input_w
+ scale_y = orig_h / input_h
+
+ boxes[:, 0] *= scale_x # x1
+ boxes[:, 1] *= scale_y # y1
+ boxes[:, 2] *= scale_x # x2
+ boxes[:, 3] *= scale_y # y2
+
+ # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS
+ boxes_xywh = []
+ for box in boxes:
+ x1, y1, x2, y2 = box
+ boxes_xywh.append([x1, y1, x2 - x1, y2 - y1])
+
+ # 7. Apply NMS
+ indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
+
+ # 8. Return filtered boxes
+ result_boxes = []
+ result_scores = []
+ result_classes = []
+ if len(boxes) > 0 and len(scores) > 0:
+ indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
+ if len(indices) > 0:
+ indices = np.array(indices).flatten()
+ for i in indices:
+ i = int(i)
+ result_boxes.append(boxes[i])
+ result_scores.append(scores[i])
+ result_classes.append(class_ids[i])
+ return result_boxes, result_scores, result_classes
+
+import os
+import time
+import numpy as np
+import cv2
+from pathlib import Path
+from typing import List, Dict, Optional
+
+# Only traffic-related classes for detection
+TRAFFIC_CLASS_NAMES = [
+ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck',
+ 'traffic light', 'stop sign', 'parking meter'
+]
+
+class OpenVINOVehicleDetector:
+ def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4):
+ import openvino as ov
+ self.device = device
+ self.confidence_threshold = confidence_threshold
+ self.ocr_reader = None
+ self.class_names = TRAFFIC_CLASS_NAMES
+ self.performance_stats = {
+ 'fps': 0,
+ 'avg_inference_time': 0,
+ 'frames_processed': 0,
+ 'backend': f"OpenVINO-{device}",
+ 'total_detections': 0,
+ 'detection_rate': 0
+ }
+ self._inference_times = []
+ self._start_time = time.time()
+ self._frame_count = 0
+ # Model selection logic
+ self.model_path = self._find_best_model(model_path, use_quantized)
+ self.core = ov.Core()
+ self.model = self.core.read_model(self.model_path)
+ # Always reshape to static shape before accessing .shape
+ self.model.reshape({0: [1, 3, 640, 640]})
+ self.input_shape = self.model.inputs[0].shape
+ self.input_height = self.input_shape[2]
+ self.input_width = self.input_shape[3]
+ self.ov_config = {}
+ if device != "CPU":
+ # Already reshaped above, so nothing more needed here
+ pass
+ if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices):
+ self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
+ self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config)
+
+ self.output_layer = self.compiled_model.output(0)
+
+ def _find_best_model(self, model_path, use_quantized):
+ # Priority: quantized IR > IR > .pt
+ search_paths = [
+ Path(model_path) if model_path else None,
+ Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None,
+ Path("yolo11x_openvino_model/yolo11x.xml"),
+ Path("rcb/yolo11x_openvino_model/yolo11x.xml"),
+ Path("yolo11x.xml"),
+ Path("rcb/yolo11x.xml"),
+ Path("yolo11x.pt"),
+ Path("rcb/yolo11x.pt")
+ ]
+ for p in search_paths:
+ if p and p.exists():
+ return str(p)
+ raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.")
+
+ def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]:
+ if conf_threshold is None:
+ conf_threshold = 0.1 # Lowered for debugging
+ start = time.time()
+ input_tensor = self._preprocess(frame)
+ output = self.compiled_model([input_tensor])[self.output_layer]
+ # Debug: print raw output shape
+ print(f"[DEBUG] Model output shape: {output.shape}")
+ detections = self._postprocess(output, frame.shape, conf_threshold)
+ print(f"[DEBUG] Detections after postprocess: {len(detections)}")
+ elapsed = time.time() - start
+ self._inference_times.append(elapsed)
+ self._frame_count += 1
+ self.performance_stats['frames_processed'] = self._frame_count
+ self.performance_stats['total_detections'] += len(detections)
+ if len(self._inference_times) > 100:
+ self._inference_times.pop(0)
+ self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0
+ total_time = time.time() - self._start_time
+ self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0
+ return detections
+
+ def _preprocess(self, frame: np.ndarray) -> np.ndarray:
+ img = cv2.resize(frame, (self.input_width, self.input_height))
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ img = img.astype(np.float32) / 255.0
+ img = img.transpose(2, 0, 1)[None]
+ return img
+
+ def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]:
+ # Output: (1, 84, 8400) or (84, 8400) or (8400, 84)
+ if output.ndim == 3:
+ output = np.squeeze(output)
+ if output.shape[0] == 84:
+ output = output.T # (8400, 84)
+ boxes = output[:, :4]
+ scores = output[:, 4:]
+ class_ids = np.argmax(scores, axis=1)
+ confidences = np.max(scores, axis=1)
+ detections = []
+ h, w = frame_shape[:2]
+ for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)):
+ if score < conf_threshold:
+ continue
+ x_c, y_c, bw, bh = box
+ # If normalized, scale to input size
+ if all(0.0 <= v <= 1.0 for v in box):
+ x_c *= self.input_width
+ y_c *= self.input_height
+ bw *= self.input_width
+ bh *= self.input_height
+ # Scale to original frame size
+ scale_x = w / self.input_width
+ scale_y = h / self.input_height
+ x_c *= scale_x
+ y_c *= scale_y
+ bw *= scale_x
+ bh *= scale_y
+ x1 = int(round(x_c - bw / 2))
+ y1 = int(round(y_c - bh / 2))
+ x2 = int(round(x_c + bw / 2))
+ y2 = int(round(y_c + bh / 2))
+ x1 = max(0, min(x1, w - 1))
+ y1 = max(0, min(y1, h - 1))
+ x2 = max(0, min(x2, w - 1))
+ y2 = max(0, min(y2, h - 1))
+ if x2 <= x1 or y2 <= y1:
+ continue
+ # Only keep class 9 as traffic light, rename if found
+ if class_id == 9:
+ class_name = "traffic light"
+ elif class_id < len(TRAFFIC_CLASS_NAMES):
+ class_name = TRAFFIC_CLASS_NAMES[class_id]
+ else:
+ continue # Remove unknown/other classes
+ detections.append({
+ 'bbox': [x1, y1, x2, y2],
+ 'confidence': float(score),
+ 'class_id': int(class_id),
+ 'class_name': class_name
+ })
+ print(f"[DEBUG] Raw detections before NMS: {len(detections)}")
+ # Apply NMS
+ if len(detections) > 0:
+ boxes = np.array([det['bbox'] for det in detections])
+ scores = np.array([det['confidence'] for det in detections])
+ indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5)
+ if isinstance(indices, (list, tuple)) and len(indices) > 0:
+ indices = np.array(indices).flatten()
+ elif isinstance(indices, np.ndarray) and indices.size > 0:
+ indices = indices.flatten()
+ else:
+ indices = []
+ detections = [detections[int(i)] for i in indices] if len(indices) > 0 else []
+ print(f"[DEBUG] Detections after NMS: {len(detections)}")
+ return detections
+
+ def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray:
+ # 80+ visually distinct colors for COCO classes (BGR)
+ COCO_COLORS = [
+ (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49),
+ (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187),
+ (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236),
+ (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199),
+ (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49),
+ (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187),
+ (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236),
+ (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199),
+ (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49),
+ (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187),
+ (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
+ (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
+ (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
+ (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
+ (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49)
+ ]
+ for det in detections:
+ x1, y1, x2, y2 = det['bbox']
+ label = f"{det['class_name']} {det['confidence']:.2f}"
+ color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)]
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness)
+ cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
+ return frame
+
+# --- Video/Image/Live Inference ---
+def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None):
+ if isinstance(source, str) and not os.path.exists(source):
+ print(f"Downloading sample video: {source}")
+ import requests
+ url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4"
+ r = requests.get(url)
+ with open(source, 'wb') as f:
+ f.write(r.content)
+ cap = cv2.VideoCapture(source)
+ if not cap.isOpened():
+ print(f"Failed to open video source: {source}")
+ return
+ window_name = "YOLOv11x + OpenVINO Detection"
+ if use_popup:
+ cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)
+ frame_count = 0
+ times = []
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+ if flip:
+ frame = cv2.flip(frame, 1)
+ if video_width:
+ scale = video_width / max(frame.shape[:2])
+ frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
+ start = time.time()
+ detections = detector.infer(frame, conf_threshold=conf_threshold)
+ frame = detector.draw(frame, detections)
+ elapsed = time.time() - start
+ times.append(elapsed)
+ if len(times) > 200:
+ times.pop(0)
+ fps = 1.0 / np.mean(times) if times else 0
+ cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
+ if use_popup:
+ cv2.imshow(window_name, frame)
+ if cv2.waitKey(1) & 0xFF == 27:
+ break
+ else:
+ cv2.imshow(window_name, frame)
+ if cv2.waitKey(1) & 0xFF == 27:
+ break
+ frame_count += 1
+ cap.release()
+ cv2.destroyAllWindows()
+
+# --- Main Entrypoint ---
+if __name__ == "__main__":
+ # Choose model: yolo11x or yolo11n, etc.
+ MODEL_NAME = "yolo11x"
+
+ DEVICE = "AUTO" # or "CPU", "GPU"
+ # Step 1: Convert model if needed
+ ov_xml = convert_yolo_to_openvino(MODEL_NAME)
+ # Step 2: Quantize (optional, demo skips actual quantization)
+ ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME)
+ # Step 3: Create detector
+ detector = OpenVINOYOLODetector(ov_xml, device=DEVICE)
+ # Step 4: Run on webcam, video, or image
+ # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg"
+ run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280)
+# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25)
+# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25)
+# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed.
+
+import numpy as np
+import cv2
+
+def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None):
+ """
+ output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85])
+ conf_threshold: minimum confidence
+ iou_threshold: for NMS
+ input_shape: model input size (w, h)
+ original_shape: original image size (w, h)
+ """
+ # 1. Squeeze batch dimension
+ output = np.squeeze(output) # [25200, 85]
+
+ # 2. Split predictions
+ boxes = output[:, :4]
+ obj_conf = output[:, 4]
+ class_scores = output[:, 5:]
+
+ # 3. Get class with highest score
+ class_ids = np.argmax(class_scores, axis=1)
+ class_conf = class_scores[np.arange(len(class_scores)), class_ids]
+
+ # 4. Multiply objectness confidence with class confidence
+ scores = obj_conf * class_conf
+
+ # 5. Filter by confidence threshold
+ mask = scores > conf_threshold
+ boxes = boxes[mask]
+ scores = scores[mask]
+ class_ids = class_ids[mask]
+
+ if original_shape is not None:
+ # Rescale boxes from input_shape to original image shape
+ input_w, input_h = input_shape
+ orig_w, orig_h = original_shape
+ scale_x = orig_w / input_w
+ scale_y = orig_h / input_h
+
+ boxes[:, 0] *= scale_x # x1
+ boxes[:, 1] *= scale_y # y1
+ boxes[:, 2] *= scale_x # x2
+ boxes[:, 3] *= scale_y # y2
+
+ # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS
+ boxes_xywh = []
+ for box in boxes:
+ x1, y1, x2, y2 = box
+ boxes_xywh.append([x1, y1, x2 - x1, y2 - y1])
+
+ # 7. Apply NMS
+ indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
+
+ # 8. Return filtered boxes
+ result_boxes = []
+ result_scores = []
+ result_classes = []
+ if len(boxes) > 0 and len(scores) > 0:
+ indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold)
+ if len(indices) > 0:
+ indices = np.array(indices).flatten()
+ for i in indices:
+ i = int(i)
+ result_boxes.append(boxes[i])
+ result_scores.append(scores[i])
+ result_classes.append(class_ids[i])
+ return result_boxes, result_scores, result_classes
+
+import os
+import time
+import numpy as np
+import cv2
+from pathlib import Path
+from typing import List, Dict, Optional
+
+# Only traffic-related classes for detection
+TRAFFIC_CLASS_NAMES = [
+ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck',
+ 'traffic light', 'stop sign', 'parking meter'
+]
+
+class OpenVINOVehicleDetector:
+ def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4):
+ import openvino as ov
+ self.device = device
+ self.confidence_threshold = confidence_threshold
+ self.ocr_reader = None
+ self.class_names = TRAFFIC_CLASS_NAMES
+ self.performance_stats = {
+ 'fps': 0,
+ 'avg_inference_time': 0,
+ 'frames_processed': 0,
+ 'backend': f"OpenVINO-{device}",
+ 'total_detections': 0,
+ 'detection_rate': 0
+ }
+ self._inference_times = []
+ self._start_time = time.time()
+ self._frame_count = 0
+ # Model selection logic
+ self.model_path = self._find_best_model(model_path, use_quantized)
+ self.core = ov.Core()
+ self.model = self.core.read_model(self.model_path)
+ # Always reshape to static shape before accessing .shape
+ self.model.reshape({0: [1, 3, 640, 640]})
+ self.input_shape = self.model.inputs[0].shape
+ self.input_height = self.input_shape[2]
+ self.input_width = self.input_shape[3]
+ self.ov_config = {}
+ if device != "CPU":
+ # Already reshaped above, so nothing more needed here
+ pass
+ if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices):
+ self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
+ self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config)
+
+ self.output_layer = self.compiled_model.output(0)
+
+ def _find_best_model(self, model_path, use_quantized):
+ # Priority: quantized IR > IR > .pt
+ search_paths = [
+ Path(model_path) if model_path else None,
+ Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None,
+ Path("yolo11x_openvino_model/yolo11x.xml"),
+ Path("rcb/yolo11x_openvino_model/yolo11x.xml"),
+ Path("yolo11x.xml"),
+ Path("rcb/yolo11x.xml"),
+ Path("yolo11x.pt"),
+ Path("rcb/yolo11x.pt")
+ ]
+ for p in search_paths:
+ if p and p.exists():
+ return str(p)
+ raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.")
+
+ def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]:
+ if conf_threshold is None:
+ conf_threshold = 0.1 # Lowered for debugging
+ start = time.time()
+ input_tensor = self._preprocess(frame)
+ output = self.compiled_model([input_tensor])[self.output_layer]
+ # Debug: print raw output shape
+ print(f"[DEBUG] Model output shape: {output.shape}")
+ detections = self._postprocess(output, frame.shape, conf_threshold)
+ print(f"[DEBUG] Detections after postprocess: {len(detections)}")
+ elapsed = time.time() - start
+ self._inference_times.append(elapsed)
+ self._frame_count += 1
+ self.performance_stats['frames_processed'] = self._frame_count
+ self.performance_stats['total_detections'] += len(detections)
+ if len(self._inference_times) > 100:
+ self._inference_times.pop(0)
+ self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0
+ total_time = time.time() - self._start_time
+ self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0
+ return detections
+
+ def _preprocess(self, frame: np.ndarray) -> np.ndarray:
+ img = cv2.resize(frame, (self.input_width, self.input_height))
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ img = img.astype(np.float32) / 255.0
+ img = img.transpose(2, 0, 1)[None]
+ return img
+
+ def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]:
+ # Output: (1, 84, 8400) or (84, 8400) or (8400, 84)
+ if output.ndim == 3:
+ output = np.squeeze(output)
+ if output.shape[0] == 84:
+ output = output.T # (8400, 84)
+ boxes = output[:, :4]
+ scores = output[:, 4:]
+ class_ids = np.argmax(scores, axis=1)
+ confidences = np.max(scores, axis=1)
+ detections = []
+ h, w = frame_shape[:2]
+ for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)):
+ if score < conf_threshold:
+ continue
+ x_c, y_c, bw, bh = box
+ # If normalized, scale to input size
+ if all(0.0 <= v <= 1.0 for v in box):
+ x_c *= self.input_width
+ y_c *= self.input_height
+ bw *= self.input_width
+ bh *= self.input_height
+ # Scale to original frame size
+ scale_x = w / self.input_width
+ scale_y = h / self.input_height
+ x_c *= scale_x
+ y_c *= scale_y
+ bw *= scale_x
+ bh *= scale_y
+ x1 = int(round(x_c - bw / 2))
+ y1 = int(round(y_c - bh / 2))
+ x2 = int(round(x_c + bw / 2))
+ y2 = int(round(y_c + bh / 2))
+ x1 = max(0, min(x1, w - 1))
+ y1 = max(0, min(y1, h - 1))
+ x2 = max(0, min(x2, w - 1))
+ y2 = max(0, min(y2, h - 1))
+ if x2 <= x1 or y2 <= y1:
+ continue
+ # Only keep class 9 as traffic light, rename if found
+ if class_id == 9:
+ class_name = "traffic light"
+ elif class_id < len(TRAFFIC_CLASS_NAMES):
+ class_name = TRAFFIC_CLASS_NAMES[class_id]
+ else:
+ continue # Remove unknown/other classes
+ detections.append({
+ 'bbox': [x1, y1, x2, y2],
+ 'confidence': float(score),
+ 'class_id': int(class_id),
+ 'class_name': class_name
+ })
+ print(f"[DEBUG] Raw detections before NMS: {len(detections)}")
+ # Apply NMS
+ if len(detections) > 0:
+ boxes = np.array([det['bbox'] for det in detections])
+ scores = np.array([det['confidence'] for det in detections])
+ indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5)
+ if isinstance(indices, (list, tuple)) and len(indices) > 0:
+ indices = np.array(indices).flatten()
+ elif isinstance(indices, np.ndarray) and indices.size > 0:
+ indices = indices.flatten()
+ else:
+ indices = []
+ detections = [detections[int(i)] for i in indices] if len(indices) > 0 else []
+ print(f"[DEBUG] Detections after NMS: {len(detections)}")
+ return detections
+
+ def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray:
+ # 80+ visually distinct colors for COCO classes (BGR)
+ COCO_COLORS = [
+ (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49),
+ (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187),
+ (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236),
+ (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199),
+ (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49),
+ (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187),
+ (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236),
+ (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199),
+ (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49),
+ (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187),
+ (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
+ (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
+ (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49),
+ (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187),
+ (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236),
+ (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49)
+ ]
+ for det in detections:
+ x1, y1, x2, y2 = det['bbox']
+ label = f"{det['class_name']} {det['confidence']:.2f}"
+ color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)]
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness)
+ cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
+ return frame
\ No newline at end of file
diff --git a/qt_app_pyside1/__pycache__/red_light_violation_pipeline.cpython-311.pyc b/qt_app_pyside1/__pycache__/red_light_violation_pipeline.cpython-311.pyc
index 26a997f..2874dc0 100644
Binary files a/qt_app_pyside1/__pycache__/red_light_violation_pipeline.cpython-311.pyc and b/qt_app_pyside1/__pycache__/red_light_violation_pipeline.cpython-311.pyc differ
diff --git a/qt_app_pyside1/__pycache__/splash.cpython-311.pyc b/qt_app_pyside1/__pycache__/splash.cpython-311.pyc
index b8b402f..8bb8e69 100644
Binary files a/qt_app_pyside1/__pycache__/splash.cpython-311.pyc and b/qt_app_pyside1/__pycache__/splash.cpython-311.pyc differ
diff --git a/qt_app_pyside1/config.json b/qt_app_pyside1/config.json
index c065732..a482cae 100644
--- a/qt_app_pyside1/config.json
+++ b/qt_app_pyside1/config.json
@@ -4,7 +4,8 @@
"enable_ocr": true,
"enable_tracking": true,
"model_path": null,
- "device": "CPU"
+ "model": "YOLOv11n",
+ "device": "GPU"
},
"violations": {
"red_light_grace_period": 2.0,
@@ -29,5 +30,11 @@
"analytics": {
"enable_charts": true,
"history_length": 1000
+ },
+ "vlm": {
+ "backend_url": "http://localhost:8399",
+ "enable_search": true,
+ "enable_vqa": true,
+ "timeout": 30
}
}
\ No newline at end of file
diff --git a/qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc
index 9af8648..a445608 100644
Binary files a/qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc and b/qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc differ
diff --git a/qt_app_pyside1/controllers/__pycache__/analytics_controller.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/analytics_controller.cpython-311.pyc
index 4275822..f8ffc38 100644
Binary files a/qt_app_pyside1/controllers/__pycache__/analytics_controller.cpython-311.pyc and b/qt_app_pyside1/controllers/__pycache__/analytics_controller.cpython-311.pyc differ
diff --git a/qt_app_pyside1/controllers/__pycache__/bytetrack_tracker.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/bytetrack_tracker.cpython-311.pyc
index ec40a24..ff24eef 100644
Binary files a/qt_app_pyside1/controllers/__pycache__/bytetrack_tracker.cpython-311.pyc and b/qt_app_pyside1/controllers/__pycache__/bytetrack_tracker.cpython-311.pyc differ
diff --git a/qt_app_pyside1/controllers/__pycache__/model_manager.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/model_manager.cpython-311.pyc
index 49de43f..53fe91a 100644
Binary files a/qt_app_pyside1/controllers/__pycache__/model_manager.cpython-311.pyc and b/qt_app_pyside1/controllers/__pycache__/model_manager.cpython-311.pyc differ
diff --git a/qt_app_pyside1/controllers/__pycache__/performance_overlay.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/performance_overlay.cpython-311.pyc
index d2a4744..c077357 100644
Binary files a/qt_app_pyside1/controllers/__pycache__/performance_overlay.cpython-311.pyc and b/qt_app_pyside1/controllers/__pycache__/performance_overlay.cpython-311.pyc differ
diff --git a/qt_app_pyside1/controllers/__pycache__/red_light_violation_detector.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/red_light_violation_detector.cpython-311.pyc
index 26ac61e..fa890b8 100644
Binary files a/qt_app_pyside1/controllers/__pycache__/red_light_violation_detector.cpython-311.pyc and b/qt_app_pyside1/controllers/__pycache__/red_light_violation_detector.cpython-311.pyc differ
diff --git a/qt_app_pyside1/controllers/__pycache__/video_controller_new.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/video_controller_new.cpython-311.pyc
index 96751d1..b0dcd15 100644
Binary files a/qt_app_pyside1/controllers/__pycache__/video_controller_new.cpython-311.pyc and b/qt_app_pyside1/controllers/__pycache__/video_controller_new.cpython-311.pyc differ
diff --git a/qt_app_pyside1/controllers/model_manager.py b/qt_app_pyside1/controllers/model_manager.py
index b78809a..d586a1e 100644
--- a/qt_app_pyside1/controllers/model_manager.py
+++ b/qt_app_pyside1/controllers/model_manager.py
@@ -52,7 +52,8 @@ class ModelManager:
"confidence_threshold": 0.3,
"enable_ocr": True,
"enable_tracking": True,
- "model_path": None
+ "model_path": None,
+ "device": "GPU" # Force GPU usage for Intel Arc
},
"violations": {
"red_light_grace_period": 2.0,
@@ -97,13 +98,27 @@ class ModelManager:
# Initialize detector
print(f"✅ Initializing OpenVINO detector with model: {model_path}")
+
+ # Store current model info for stats
+ self.current_model_path = model_path
+ self.current_model_name = self._extract_model_name_from_path(model_path)
+
device = self.config["detection"].get("device", "AUTO")
- print(f"✅ Using inference device: {device}")
+ print(f"🔧 Model Manager: Config device setting: {device}")
+ print(f"🔧 Model Manager: Creating detector with device: {device}")
self.detector = OpenVINOVehicleDetector(
model_path=model_path,
device=device,
confidence_threshold=self.config["detection"]["confidence_threshold"]
)
+ print(f"✅ Detector created with device: {device}")
+
+ # Verify the detector is using the correct device
+ if hasattr(self.detector, 'device'):
+ actual_device = self.detector.device
+ print(f"🔍 Model Manager: Detector reports device as: {actual_device}")
+ else:
+ print(f"🔍 Model Manager: Detector device attribute not available")
# Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic
self.violation_pipeline = RedLightViolationPipeline(debug=True)
@@ -128,18 +143,48 @@ class ModelManager:
traceback.print_exc()
def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]:
-
+ """
+ Find the best model path based on configuration.
+ Now respects the model selection from config panel.
+ """
if base_model_name is None:
- device = self.config["detection"].get("device", "AUTO")
- if device == "CPU" or device == "AUTO":
- # Use yolo11n for CPU - faster, lighter model
- base_model_name = "yolo11n"
- print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
+ # First, check if a specific model is selected in config
+ selected_model = self.config["detection"].get("model", None)
+ if selected_model and selected_model.lower() != "auto":
+ base_model_name = selected_model.lower()
+ # Convert YOLOv11x format to yolo11x format
+ if 'yolov11' in base_model_name:
+ base_model_name = base_model_name.replace('yolov11', 'yolo11')
+ print(f"🎯 Using model selected from config panel: {base_model_name}")
else:
- # Use yolo11x for GPU - larger model with better accuracy
- base_model_name = "yolo11x"
- print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
+ # Fallback to device-based selection only if no specific model selected
+ device = self.config["detection"].get("device", "AUTO")
+ if device == "CPU" or device == "AUTO":
+ # Use yolo11n for CPU - faster, lighter model
+ base_model_name = "yolo11n"
+ print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
+ else:
+ # Use yolo11x for GPU - larger model with better accuracy
+ base_model_name = "yolo11x"
+ print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
+
+ # Ensure we have a clean model name (remove any version suffixes)
+ if base_model_name:
+ # Handle different model name formats
+ if "yolo11" in base_model_name.lower():
+ if "11n" in base_model_name.lower():
+ base_model_name = "yolo11n"
+ elif "11x" in base_model_name.lower():
+ base_model_name = "yolo11x"
+ elif "11s" in base_model_name.lower():
+ base_model_name = "yolo11s"
+ elif "11m" in base_model_name.lower():
+ base_model_name = "yolo11m"
+ elif "11l" in base_model_name.lower():
+ base_model_name = "yolo11l"
+
+ print(f"🔍 Looking for model: {base_model_name}")
# Check if the openvino_models directory exists in the current working directory
cwd_openvino_dir = Path.cwd() / "openvino_models"
@@ -201,6 +246,55 @@ class ModelManager:
print(f"❌ No model found for {base_model_name}")
return None
+
+ def _extract_model_name_from_path(self, model_path: str) -> str:
+ """Extract model name from file path"""
+ try:
+ # Convert to lowercase for matching
+ path_lower = model_path.lower()
+ print(f"🔍 Extracting model name from path: {model_path}")
+ print(f"🔍 Path lower: {path_lower}")
+
+ # Check for specific models
+ if 'yolo11n' in path_lower:
+ extracted_name = 'YOLOv11n'
+ print(f"✅ Extracted model name: {extracted_name}")
+ return extracted_name
+ elif 'yolo11s' in path_lower:
+ extracted_name = 'YOLOv11s'
+ print(f"✅ Extracted model name: {extracted_name}")
+ return extracted_name
+ elif 'yolo11m' in path_lower:
+ extracted_name = 'YOLOv11m'
+ print(f"✅ Extracted model name: {extracted_name}")
+ return extracted_name
+ elif 'yolo11l' in path_lower:
+ extracted_name = 'YOLOv11l'
+ print(f"✅ Extracted model name: {extracted_name}")
+ return extracted_name
+ elif 'yolo11x' in path_lower:
+ extracted_name = 'YOLOv11x'
+ print(f"✅ Extracted model name: {extracted_name}")
+ return extracted_name
+ elif 'yolo11' in path_lower:
+ extracted_name = 'YOLOv11'
+ print(f"✅ Extracted model name: {extracted_name}")
+ return extracted_name
+ else:
+ extracted_name = 'YOLO'
+ print(f"⚠️ Fallback model name: {extracted_name}")
+ return extracted_name
+ except Exception as e:
+ print(f"⚠️ Error extracting model name: {e}")
+ return 'Unknown'
+
+ def get_current_model_info(self) -> dict:
+ """Get current model information for stats"""
+ return {
+ 'model_path': getattr(self, 'current_model_path', None),
+ 'model_name': getattr(self, 'current_model_name', 'Unknown'),
+ 'device': self.detector.get_device() if self.detector else 'Unknown'
+ }
def detect(self, frame: np.ndarray) -> List[Dict]:
"""
@@ -392,8 +486,9 @@ class ModelManager:
if not new_config:
return
- # Store old device setting to check if it changed
+ # Store old settings to check if they changed
old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO"
+ old_model = self.config["detection"].get("model", "auto") if "detection" in self.config else "auto"
# Update configuration
for section in new_config:
@@ -402,21 +497,46 @@ class ModelManager:
else:
self.config[section] = new_config[section]
- # Check if device changed - if so, we need to reinitialize models
+ # Check if device or model changed - if so, we need to reinitialize models
new_device = self.config["detection"].get("device", "AUTO")
+ new_model = self.config["detection"].get("model", "auto")
device_changed = old_device != new_device
+ model_changed = old_model != new_model
- if device_changed:
- print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...")
- # Reinitialize models with new device
- self._initialize_models()
+ if device_changed or model_changed:
+ print(f"📢 Configuration changed:")
+ if device_changed:
+ print(f" Device: {old_device} → {new_device}")
+ if model_changed:
+ print(f" Model: {old_model} → {new_model}")
+ print(f" Reinitializing models...")
+
+ # Force complete reinitialization - let the model path extraction handle the naming
+ self.force_model_reload()
return
- # Just update detector confidence threshold if device didn't change
+ # Just update detector confidence threshold if device and model didn't change
if self.detector:
conf_thres = self.config["detection"].get("confidence_threshold", 0.5)
self.detector.conf_thres = conf_thres
+ def force_model_reload(self):
+ """Force complete model reload with current config"""
+ print("🔄 Force reloading models with current configuration...")
+
+ # Get the configured model selection
+ selected_model = self.config["detection"].get("model", "auto")
+ print(f"🎯 Force reload: Config model selection = {selected_model}")
+
+ # Clear current models
+ self.detector = None
+ self.violation_pipeline = None
+
+ # Reinitialize with current config - let _initialize_models handle the naming
+ self._initialize_models()
+
+ print("✅ Models reloaded successfully")
+
def _bbox_iou(self, boxA, boxB):
# Compute the intersection over union of two boxes
xA = max(boxA[0], boxB[0])
diff --git a/qt_app_pyside1/controllers/video_controller.py b/qt_app_pyside1/controllers/video_controller.py
index b6de05f..89dbfe3 100644
--- a/qt_app_pyside1/controllers/video_controller.py
+++ b/qt_app_pyside1/controllers/video_controller.py
@@ -73,6 +73,15 @@ class VideoController(QObject):
# Debug counter for monitoring frame processing
self.debug_counter = 0
+
+ def on_model_switched(self, device):
+ """Handle device switch notification from model manager."""
+ print(f"[VIDEO CONTROLLER] Device switched to: {device}")
+ # Update model manager config if needed
+ if self.model_manager and hasattr(self.model_manager, 'config'):
+ self.model_manager.config["detection"]["device"] = device
+ print(f"[VIDEO CONTROLLER] Updated model manager device to: {device}")
+
def set_source(self, source):
"""Set video source (file path, camera index, or URL)"""
print(f"DEBUG: VideoController.set_source called with: {source} (type: {type(source)})")
diff --git a/qt_app_pyside1/controllers/video_controller_new.py b/qt_app_pyside1/controllers/video_controller_new.py
index 794059b..2e762f5 100644
--- a/qt_app_pyside1/controllers/video_controller_new.py
+++ b/qt_app_pyside1/controllers/video_controller_new.py
@@ -1,4 +1,1958 @@
-from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer
+# from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer, Slot
+# from PySide6.QtGui import QImage, QPixmap
+# import cv2
+# import time
+# import numpy as np
+# from datetime import datetime
+# from collections import deque
+# from typing import Dict, List, Optional
+# import os
+# import sys
+# import math
+
+# # Add parent directory to path for imports
+# sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+# # Import utilities
+# from utils.annotation_utils import (
+# draw_detections,
+# draw_performance_metrics,
+# resize_frame_for_display,
+# convert_cv_to_qimage,
+# convert_cv_to_pixmap,
+# pipeline_with_violation_line
+# )
+
+# # Import enhanced annotation utilities
+# from utils.enhanced_annotation_utils import (
+# enhanced_draw_detections,
+# draw_performance_overlay,
+# enhanced_cv_to_qimage,
+# enhanced_cv_to_pixmap
+# )
+
+# # Import traffic light color detection utilities
+# from red_light_violation_pipeline import RedLightViolationPipeline
+# from utils.traffic_light_utils import detect_traffic_light_color, draw_traffic_light_status, ensure_traffic_light_color
+# from utils.crosswalk_utils2 import detect_crosswalk_and_violation_line, draw_violation_line, get_violation_line_y
+# from controllers.bytetrack_tracker import ByteTrackVehicleTracker
+# TRAFFIC_LIGHT_CLASSES = ["traffic light", "trafficlight", "tl"]
+# TRAFFIC_LIGHT_NAMES = ['trafficlight', 'traffic light', 'tl', 'signal']
+
+# def normalize_class_name(class_name):
+# """Normalizes class names from different models/formats to a standard name"""
+# if not class_name:
+# return ""
+
+# name_lower = class_name.lower()
+
+# # Traffic light variants
+# if name_lower in ['traffic light', 'trafficlight', 'traffic_light', 'tl', 'signal']:
+# return 'traffic light'
+
+# # Keep specific vehicle classes (car, truck, bus) separate
+# # Just normalize naming variations within each class
+# if name_lower in ['car', 'auto', 'automobile']:
+# return 'car'
+# elif name_lower in ['truck']:
+# return 'truck'
+# elif name_lower in ['bus']:
+# return 'bus'
+# elif name_lower in ['motorcycle', 'scooter', 'motorbike', 'bike']:
+# return 'motorcycle'
+
+# # Person variants
+# if name_lower in ['person', 'pedestrian', 'human']:
+# return 'person'
+
+# # Other common classes can be added here
+
+# return class_name
+
+# def is_traffic_light(class_name):
+# """Helper function to check if a class name is a traffic light with normalization"""
+# if not class_name:
+# return False
+# normalized = normalize_class_name(class_name)
+# return normalized == 'traffic light'
+
+# class VideoController(QObject):
+# frame_ready = Signal(object, object, dict) # QPixmap, detections, metrics
+# raw_frame_ready = Signal(np.ndarray, list, float) # frame, detections, fps
+# frame_np_ready = Signal(np.ndarray) # Direct NumPy frame signal for display
+# stats_ready = Signal(dict) # Dictionary with stats (fps, detection_time, traffic_light)
+# violation_detected = Signal(dict) # Signal emitted when a violation is detected
+# progress_ready = Signal(int, int, float) # value, max_value, timestamp
+# device_info_ready = Signal(dict) # Signal to emit device info to the UI
+# auto_select_model_device = Signal() # Signal for UI to request auto model/device selection
+# performance_stats_ready = Signal(dict) # NEW: Signal for performance tab (fps, inference, device, res)
+# violations_batch_ready = Signal(list) # NEW: Signal to emit a batch of violations
+# pause_state_changed = Signal(bool) # Signal emitted when pause state changes (True=paused, False=playing)
+
+# def __init__(self, model_manager=None):
+# """
+# Initialize video controller.
+
+# Args:
+# model_manager: Model manager instance for detection and violation
+# """
+# super().__init__()
+# print("Loaded advanced VideoController from video_controller_new.py") # DEBUG: Confirm correct controller
+
+# self._running = False
+# self._paused = False # Add pause state
+# self._last_frame = None # Store last frame for VLM analysis during pause
+# self._last_analysis_data = {} # Store last analysis data for VLM
+# self.source = None
+# self.source_type = None
+# self.source_fps = 0
+# self.performance_metrics = {}
+# self.mutex = QMutex()
+# self.pause_condition = QWaitCondition() # Add wait condition for pause
+
+# # Performance tracking
+# self.processing_times = deque(maxlen=100) # Store last 100 processing times
+# self.fps_history = deque(maxlen=100) # Store last 100 FPS values
+# self.start_time = time.time()
+# self.frame_count = 0
+# self.actual_fps = 0.0
+
+# self.model_manager = model_manager
+# self.inference_model = None
+# self.tracker = None
+
+# # Initialize device tracking
+# if self.model_manager and hasattr(self.model_manager, 'config'):
+# self.current_device = self.model_manager.config.get("detection", {}).get("device", "CPU")
+# else:
+# self.current_device = "CPU"
+# print(f"🔧 Video Controller: Initialized with device: {self.current_device}")
+
+# self.current_frame = None
+# self.current_detections = []
+
+# # Traffic light state tracking
+# self.latest_traffic_light = {"color": "unknown", "confidence": 0.0}
+
+# # Vehicle tracking settings
+# self.vehicle_history = {} # Dictionary to store vehicle position history
+# self.vehicle_statuses = {} # Track stable movement status
+# self.movement_threshold = 1.5 # ADJUSTED: More balanced movement detection (was 0.8)
+# self.min_confidence_threshold = 0.3 # FIXED: Lower threshold for better detection (was 0.5)
+
+# # Enhanced violation detection settings
+# self.position_history_size = 20 # Increased from 10 to track longer history
+# self.crossing_check_window = 8 # Check for crossings over the last 8 frames instead of just 2
+# self.max_position_jump = 50 # Maximum allowed position jump between frames (detect ID switches)
+
+# # Set up violation detection
+# try:
+# from controllers.red_light_violation_detector import RedLightViolationDetector
+# self.violation_detector = RedLightViolationDetector()
+# print("✅ Red light violation detector initialized")
+# except Exception as e:
+# self.violation_detector = None
+# print(f"❌ Could not initialize violation detector: {e}")
+
+# # Import crosswalk detection
+# try:
+# self.detect_crosswalk_and_violation_line = detect_crosswalk_and_violation_line
+# # self.draw_violation_line = draw_violation_line
+# print("✅ Crosswalk detection utilities imported")
+# except Exception as e:
+# print(f"❌ Could not import crosswalk detection: {e}")
+# self.detect_crosswalk_and_violation_line = lambda frame, *args: (None, None, {})
+# # self.draw_violation_line = lambda frame, *args, **kwargs: frame
+
+# # Configure thread
+# self.thread = QThread()
+# self.moveToThread(self.thread)
+# self.thread.started.connect(self._run)
+# # Performance measurement
+# self.mutex = QMutex()
+# self.condition = QWaitCondition()
+# self.performance_metrics = {
+# 'FPS': 0.0,
+# 'Detection (ms)': 0.0,
+# 'Total (ms)': 0.0
+# }
+
+# # Setup render timer with more aggressive settings for UI updates
+# self.render_timer = QTimer()
+# self.render_timer.timeout.connect(self._process_frame)
+
+# # Frame buffer
+# self.current_frame = None
+# self.current_detections = []
+# self.current_violations = []
+
+# # Debug counter for monitoring frame processing
+# self.debug_counter = 0
+# self.violation_frame_counter = 0 # Add counter for violation processing
+
+# # Initialize the traffic light color detection pipeline
+# self.cv_violation_pipeline = RedLightViolationPipeline(debug=True)
+
+# # Initialize vehicle tracker
+# self.vehicle_tracker = ByteTrackVehicleTracker()
+
+# # Add red light violation system
+# # self.red_light_violation_system = RedLightViolationSystem()
+
+# # Initialize scene analytics adapter
+# try:
+# from utils.scene_analytics import SceneAnalyticsAdapter
+# self.scene_analytics = SceneAnalyticsAdapter(camera_id="desktop_main")
+# self.scene_analytics.object_detected.connect(self._on_scene_object_detected)
+# self.scene_analytics.scene_analytics_updated.connect(self._on_scene_analytics_updated)
+# self.scene_analytics.roi_event_detected.connect(self._on_roi_event_detected)
+# print("✅ Scene analytics adapter initialized")
+# except Exception as e:
+# self.scene_analytics = None
+# print(f"❌ Could not initialize scene analytics: {e}")
+
+# def refresh_model_info(self):
+# """Force refresh of model information for performance display"""
+# if hasattr(self, 'model_manager') and self.model_manager:
+# print("🔄 Refreshing model information in video controller")
+# # The model info will be refreshed in the next stats update
+# # Force current device update from config
+# if hasattr(self.model_manager, 'config') and 'detection' in self.model_manager.config:
+# self.current_device = self.model_manager.config['detection'].get('device', 'CPU')
+# print(f"🔄 Updated current device to: {self.current_device}")
+
+# def set_source(self, source):
+# """
+# Set video source (file path, camera index, or URL)
+
+# Args:
+# source: Video source - can be a camera index (int), file path (str),
+# or URL (str). If None, defaults to camera 0.
+
+# Returns:
+# bool: True if source was set successfully, False otherwise
+# """
+# print(f"🎬 VideoController.set_source called with: {source} (type: {type(source)})")
+
+# # Store current state
+# was_running = self._running
+
+# # Stop current processing if running
+# if self._running:
+# print("⏹️ Stopping current video processing")
+# self.stop()
+
+# try:
+# # Handle source based on type with better error messages
+# if source is None:
+# print("⚠️ Received None source, defaulting to camera 0")
+# self.source = 0
+# self.source_type = "camera"
+
+# elif isinstance(source, str) and source.strip():
+# if os.path.exists(source):
+# # Valid file path
+# self.source = source
+# self.source_type = "file"
+# print(f"📄 Source set to file: {self.source}")
+# elif source.lower().startswith(("http://", "https://", "rtsp://", "rtmp://")):
+# # URL stream
+# self.source = source
+# self.source_type = "url"
+# print(f"🌐 Source set to URL stream: {self.source}")
+# elif source.isdigit():
+# # String camera index (convert to int)
+# self.source = int(source)
+# self.source_type = "camera"
+# print(f"📹 Source set to camera index: {self.source}")
+# else:
+# # Try as device path or special string
+# self.source = source
+# self.source_type = "device"
+# print(f"📱 Source set to device path: {self.source}")
+
+# elif isinstance(source, int):
+# # Camera index
+# self.source = source
+# self.source_type = "camera"
+# print(f"📹 Source set to camera index: {self.source}")
+
+# else:
+# # Unrecognized - default to camera 0 with warning
+# print(f"⚠️ Unrecognized source type: {type(source)}, defaulting to camera 0")
+# self.source = 0
+# self.source_type = "camera"
+# except Exception as e:
+# print(f"❌ Error setting source: {e}")
+# self.source = 0
+# self.source_type = "camera"
+# return False
+
+# # Get properties of the source (fps, dimensions, etc)
+# print(f"🔍 Getting properties for source: {self.source}")
+# success = self._get_source_properties()
+
+# if success:
+# print(f"✅ Successfully configured source: {self.source} ({self.source_type})")
+
+# # Reset ByteTrack tracker for new source to ensure IDs start from 1
+# if hasattr(self, 'vehicle_tracker') and self.vehicle_tracker is not None:
+# try:
+# print("🔄 Resetting vehicle tracker for new source")
+# self.vehicle_tracker.reset()
+# except Exception as e:
+# print(f"⚠️ Could not reset vehicle tracker: {e}")
+
+# # Emit successful source change
+# self.stats_ready.emit({
+# 'source_changed': True,
+# 'source_type': self.source_type,
+# 'fps': self.source_fps if hasattr(self, 'source_fps') else 0,
+# 'dimensions': f"{self.frame_width}x{self.frame_height}" if hasattr(self, 'frame_width') else "unknown"
+# })
+
+# # Restart if previously running
+# if was_running:
+# print("▶️ Restarting video processing with new source")
+# self.start()
+# else:
+# print(f"❌ Failed to configure source: {self.source}")
+# # Notify UI about the error
+# self.stats_ready.emit({
+# 'source_changed': False,
+# 'error': f"Invalid video source: {self.source}",
+# 'source_type': self.source_type,
+# 'fps': 0,
+# 'detection_time_ms': "0",
+# 'traffic_light_color': {"color": "unknown", "confidence": 0.0}
+# })
+
+# return False
+
+# # Return success status
+# return success
+
+# def _get_source_properties(self):
+
+# try:
+# print(f"🔍 Opening video source for properties check: {self.source}")
+# cap = cv2.VideoCapture(self.source)
+
+
+# if not cap.isOpened():
+# print(f"❌ Failed to open video source: {self.source}")
+# return False
+
+
+# self.source_fps = cap.get(cv2.CAP_PROP_FPS)
+
+
+# self.frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+# self.frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+# self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
+
+
+# ret, test_frame = cap.read()
+# if not ret or test_frame is None:
+# print("⚠️ Could not read test frame from source")
+
+# if self.source_type == "camera":
+# print("🔄 Retrying camera initialization...")
+# time.sleep(1.0)
+# ret, test_frame = cap.read()
+# if not ret or test_frame is None:
+# print("❌ Camera initialization failed after retry")
+# cap.release()
+# return False
+# else:
+# print("❌ Could not read frames from video source")
+# cap.release()
+# return False
+
+# # Release the capture
+# cap.release()
+
+# print(f"✅ Video source properties: {self.frame_width}x{self.frame_height}, {self.source_fps} FPS")
+# return True
+
+# except Exception as e:
+# print(f"❌ Error getting source properties: {e}")
+# return False
+# return False
+
+# def start(self):
+# """Start video processing"""
+# if not self._running:
+# self._running = True
+# self.start_time = time.time()
+# self.frame_count = 0
+# self.debug_counter = 0
+# print("DEBUG: Starting video processing thread")
+
+# # Reset notification flags for new session
+# if hasattr(self, '_no_traffic_light_notified'):
+# delattr(self, '_no_traffic_light_notified')
+
+# # Reset ByteTrack tracker to ensure IDs start from 1
+# if hasattr(self, 'vehicle_tracker') and self.vehicle_tracker is not None:
+# try:
+# print("🔄 Resetting vehicle tracker for new session")
+# self.vehicle_tracker.reset()
+# except Exception as e:
+# print(f"⚠️ Could not reset vehicle tracker: {e}")
+
+# # Start the processing thread - add more detailed debugging
+# if not self.thread.isRunning():
+# print("🚀 Thread not running, starting now...")
+# try:
+# self.thread.start()
+# print("✅ Thread started successfully")
+# print(f"🔄 Thread state: running={self.thread.isRunning()}, finished={self.thread.isFinished()}")
+# except Exception as e:
+# print(f"❌ Failed to start thread: {e}")
+# import traceback
+# traceback.print_exc()
+# else:
+# print("⚠️ Thread is already running!")
+# print(f"🔄 Thread state: running={self.thread.isRunning()}, finished={self.thread.isFinished()}")
+
+# # Start the render timer with a very aggressive interval (10ms = 100fps)
+# # This ensures we can process frames as quickly as possible
+# print("⏱️ Starting render timer...")
+# self.render_timer.start(10)
+# print("✅ Render timer started at 100Hz")
+
+# def stop(self):
+# """Stop video processing"""
+# if self._running:
+# print("DEBUG: Stopping video processing")
+# self._running = False
+
+# # If paused, wake up the thread so it can exit
+# self.mutex.lock()
+# self._paused = False
+# self.pause_condition.wakeAll()
+# self.mutex.unlock()
+
+# self.render_timer.stop()
+# # Properly terminate the thread
+# if self.thread.isRunning():
+# self.thread.quit()
+# if not self.thread.wait(3000): # Wait 3 seconds max
+# self.thread.terminate()
+# print("WARNING: Thread termination forced")
+# # Clear the current frame
+# self.mutex.lock()
+# self.current_frame = None
+# self.mutex.unlock()
+# print("DEBUG: Video processing stopped")
+
+# def __del__(self):
+# print("[VideoController] __del__ called. Cleaning up thread and timer.")
+# self.stop()
+# if self.thread.isRunning():
+# self.thread.quit()
+# self.thread.wait(1000)
+# self.render_timer.stop()
+
+# def capture_snapshot(self) -> np.ndarray:
+# """Capture current frame"""
+# if self.current_frame is not None:
+# return self.current_frame.copy()
+# return None
+
+# def _run(self):
+# """Main processing loop (runs in thread)"""
+# try:
+# # Print the source we're trying to open
+# print(f"DEBUG: Opening video source: {self.source} (type: {type(self.source)})")
+
+# cap = None # Initialize capture variable
+
+# # Try to open source with more robust error handling
+# max_retries = 3
+# retry_delay = 1.0 # seconds
+
+# # Function to attempt opening the source with multiple retries
+# def try_open_source(src, retries=max_retries, delay=retry_delay):
+# for attempt in range(1, retries + 1):
+# print(f"🎥 Opening source (attempt {attempt}/{retries}): {src}")
+# try:
+# capture = cv2.VideoCapture(src)
+# if capture.isOpened():
+# # Try to read a test frame to confirm it's working
+# ret, test_frame = capture.read()
+# if ret and test_frame is not None:
+# print(f"✅ Source opened successfully: {src}")
+# # Reset capture position for file sources
+# if isinstance(src, str) and os.path.exists(src):
+# capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
+# return capture
+# else:
+# print(f"⚠️ Source opened but couldn't read frame: {src}")
+# capture.release()
+# else:
+# print(f"⚠️ Failed to open source: {src}")
+
+# # Retry after delay
+# if attempt < retries:
+# print(f"Retrying in {delay:.1f} seconds...")
+# time.sleep(delay)
+# except Exception as e:
+# print(f"❌ Error opening source {src}: {e}")
+# if attempt < retries:
+# print(f"Retrying in {delay:.1f} seconds...")
+# time.sleep(delay)
+
+# print(f"❌ Failed to open source after {retries} attempts: {src}")
+# return None
+
+# # Handle different source types
+# if isinstance(self.source, str) and os.path.exists(self.source):
+# # It's a valid file path
+# print(f"📄 Opening video file: {self.source}")
+# cap = try_open_source(self.source)
+
+# elif isinstance(self.source, int) or (isinstance(self.source, str) and self.source.isdigit()):
+# # It's a camera index
+# camera_idx = int(self.source) if isinstance(self.source, str) else self.source
+# print(f"📹 Opening camera with index: {camera_idx}")
+
+# # For cameras, try with different backend options if it fails
+# cap = try_open_source(camera_idx)
+
+# # If failed, try with DirectShow backend on Windows
+# if cap is None and os.name == 'nt':
+# print("🔄 Trying camera with DirectShow backend...")
+# cap = try_open_source(camera_idx + cv2.CAP_DSHOW)
+
+# else:
+# # Try as a string source (URL or device path)
+# print(f"🌐 Opening source as string: {self.source}")
+# cap = try_open_source(str(self.source))
+
+# # Check if we successfully opened the source
+# if cap is None:
+# print(f"❌ Failed to open video source after all attempts: {self.source}")
+# # Notify UI about the error
+# self.stats_ready.emit({
+# 'error': f"Could not open video source: {self.source}",
+# 'fps': "0",
+# 'detection_time_ms': "0",
+# 'traffic_light_color': {"color": "unknown", "confidence": 0.0}
+# })
+# return
+
+# # Check again to ensure capture is valid
+# if not cap or not cap.isOpened():
+# print(f"ERROR: Could not open video source {self.source}")
+# # Emit a signal to notify UI about the error
+# self.stats_ready.emit({
+# 'error': f"Failed to open video source: {self.source}",
+# 'fps': "0",
+# 'detection_time_ms': "0",
+# 'traffic_light_color': {"color": "unknown", "confidence": 0.0}
+# })
+# return
+
+# # Configure frame timing based on source FPS
+# frame_time = 1.0 / self.source_fps if self.source_fps > 0 else 0.033
+# prev_time = time.time()
+
+# # Log successful opening
+# print(f"SUCCESS: Video source opened: {self.source}")
+# print(f"Source info - FPS: {self.source_fps}, Size: {self.frame_width}x{self.frame_height}")
+# # Main processing loop
+# frame_error_count = 0
+# max_consecutive_errors = 10
+
+# # --- Violation Rule Functions ---
+# def point_in_polygon(point, polygon):
+# # Simple point-in-rect for now; replace with polygon logic if needed
+# x, y = point
+# x1, y1, w, h = polygon
+# return x1 <= x <= x1 + w and y1 <= y <= y1 + h
+
+# def calculate_speed(track, history_dict):
+# # Use last two positions for speed
+# hist = history_dict.get(track['id'], [])
+# if len(hist) < 2:
+# return 0.0
+# (x1, y1), t1 = hist[-2]
+# (x2, y2), t2 = hist[-1]
+# dist = ((x2-x1)**2 + (y2-y1)**2)**0.5
+# dt = max(t2-t1, 1e-3)
+# return dist / dt
+
+# def check_vehicle_pedestrian_conflict(vehicle_track, pedestrian_tracks, crosswalk_poly, light_state):
+# if light_state != 'green':
+# return False
+# if not point_in_polygon(vehicle_track['center'], crosswalk_poly):
+# return False
+# for ped in pedestrian_tracks:
+# if point_in_polygon(ped['center'], crosswalk_poly):
+# return True
+# return False
+
+# def check_stop_on_crosswalk(vehicle_track, crosswalk_poly, light_state, history_dict):
+# if light_state != 'red':
+# return False
+# is_inside = point_in_polygon(vehicle_track['center'], crosswalk_poly)
+# speed = calculate_speed(vehicle_track, history_dict)
+# return is_inside and speed < 0.5
+
+# def check_amber_overspeed(vehicle_track, light_state, amber_start_time, stopline_poly, history_dict, speed_limit_px_per_sec):
+# if light_state != 'amber':
+# return False
+# if not point_in_polygon(vehicle_track['center'], stopline_poly):
+# return False
+# current_time = time.time()
+# speed = calculate_speed(vehicle_track, history_dict)
+# if current_time > amber_start_time and speed > speed_limit_px_per_sec:
+# return True
+# return False
+# # --- End Violation Rule Functions ---
+
+# while self._running and cap.isOpened():
+# # Handle pause state
+# self.mutex.lock()
+# if self._paused:
+# print("[VideoController] Video paused, waiting...")
+# self.pause_condition.wait(self.mutex)
+# print("[VideoController] Video resumed")
+# self.mutex.unlock()
+
+# # Exit if we're no longer running (could have stopped while paused)
+# if not self._running:
+# break
+
+# try:
+# ret, frame = cap.read()
+# # Add critical frame debugging
+# print(f"🟡 Frame read attempt: ret={ret}, frame={None if frame is None else frame.shape}")
+
+# if not ret or frame is None:
+# frame_error_count += 1
+# print(f"⚠️ Frame read error ({frame_error_count}/{max_consecutive_errors})")
+
+# if frame_error_count >= max_consecutive_errors:
+# print("❌ Too many consecutive frame errors, stopping video thread")
+# break
+
+# # Skip this iteration and try again
+# time.sleep(0.1) # Wait a bit before trying again
+# continue
+
+# # Reset the error counter if we successfully got a frame
+# frame_error_count = 0
+
+# # Store the last frame for VLM analysis during pause
+# self._last_frame = frame.copy()
+# print(f"🟢 Last frame stored for VLM: {frame.shape}")
+
+# except Exception as e:
+# print(f"❌ Critical error reading frame: {e}")
+# frame_error_count += 1
+# if frame_error_count >= max_consecutive_errors:
+# print("❌ Too many errors, stopping video thread")
+# break
+# continue
+
+# # Detection and violation processing
+# process_start = time.time()
+
+# # Process detections
+# detection_start = time.time()
+# detections = []
+# if self.model_manager:
+# detections = self.model_manager.detect(frame)
+
+# # Normalize class names for consistency and check for traffic lights
+# traffic_light_indices = []
+# for i, det in enumerate(detections):
+# if 'class_name' in det:
+# original_name = det['class_name']
+# normalized_name = normalize_class_name(original_name)
+
+# # Keep track of traffic light indices
+# if normalized_name == 'traffic light' or original_name == 'traffic light':
+# traffic_light_indices.append(i)
+
+# if original_name != normalized_name:
+# print(f"📊 Normalized class name: '{original_name}' -> '{normalized_name}'")
+
+# det['class_name'] = normalized_name
+
+# # Ensure we have at least one traffic light for debugging
+# if not traffic_light_indices and self.source_type == 'video':
+# print("⚠️ No traffic lights detected, checking for objects that might be traffic lights...")
+
+# # Try lowering the confidence threshold specifically for traffic lights
+# # This is only for debugging purposes
+# if self.model_manager and hasattr(self.model_manager, 'detect'):
+# try:
+# low_conf_detections = self.model_manager.detect(frame, conf_threshold=0.2)
+# for det in low_conf_detections:
+# if 'class_name' in det and det['class_name'] == 'traffic light':
+# if det not in detections:
+# print(f"🚦 Found low confidence traffic light: {det['confidence']:.2f}")
+# detections.append(det)
+# except:
+# pass
+
+# detection_time = (time.time() - detection_start) * 1000
+
+# # Violation detection is disabled
+# violation_start = time.time()
+# violations = []
+# # if self.model_manager and detections:
+# # violations = self.model_manager.detect_violations(
+# # detections, frame, time.time()
+# # )
+# violation_time = (time.time() - violation_start) * 1000
+
+# # Update tracking if available
+# if self.model_manager:
+# detections = self.model_manager.update_tracking(detections, frame)
+# # If detections are returned as tuples, convert to dicts for downstream code
+# if detections and isinstance(detections[0], tuple):
+# # Convert (id, bbox, conf, class_id) to dict
+# detections = [
+# {'id': d[0], 'bbox': d[1], 'confidence': d[2], 'class_id': d[3]}
+# for d in detections
+# ]
+
+# # Calculate timing metrics
+# process_time = (time.time() - process_start) * 1000
+# self.processing_times.append(process_time)
+
+# # Update FPS
+# now = time.time()
+# self.frame_count += 1
+# elapsed = now - self.start_time
+# if elapsed > 0:
+# self.actual_fps = self.frame_count / elapsed
+
+# fps_smoothed = 1.0 / (now - prev_time) if now > prev_time else 0
+# prev_time = now
+# # Update metrics
+# self.performance_metrics = {
+# 'FPS': f"{fps_smoothed:.1f}",
+# 'Detection (ms)': f"{detection_time:.1f}",
+# 'Total (ms)': f"{process_time:.1f}"
+# }
+
+# # Store current frame data (thread-safe)
+# self.mutex.lock()
+# self.current_frame = frame.copy()
+# self.current_detections = detections
+# self.mutex.unlock()
+
+# # --- SCENE ANALYTICS PROCESSING ---
+# # Process detections through scene analytics if available
+# if self.scene_analytics:
+# try:
+# scene_analytics_data = self.scene_analytics.process_frame(frame, detections)
+# # Scene analytics automatically emit signals that we handle above
+# except Exception as e:
+# print(f"Error in scene analytics processing: {e}")
+
+# # Process frame with annotations before sending to UI
+# annotated_frame = frame.copy()
+
+# # --- VIOLATION DETECTION LOGIC (Run BEFORE drawing boxes) ---
+# # First get violation information so we can color boxes appropriately
+# violating_vehicle_ids = set() # Track which vehicles are violating
+# violations = []
+
+# # Initialize traffic light variables
+# traffic_lights = []
+# has_traffic_lights = False
+
+# # Handle multiple traffic lights with consensus approach
+# traffic_light_count = 0
+# for det in detections:
+# if is_traffic_light(det.get('class_name')):
+# has_traffic_lights = True
+# traffic_light_count += 1
+# if 'traffic_light_color' in det:
+# light_info = det['traffic_light_color']
+# traffic_lights.append({'bbox': det['bbox'], 'color': light_info.get('color', 'unknown'), 'confidence': light_info.get('confidence', 0.0)})
+
+# print(f"[TRAFFIC LIGHT] Detected {traffic_light_count} traffic light(s), has_traffic_lights={has_traffic_lights}")
+# if has_traffic_lights:
+# print(f"[TRAFFIC LIGHT] Traffic light colors: {[tl.get('color', 'unknown') for tl in traffic_lights]}")
+
+# # Get traffic light position for crosswalk detection
+# traffic_light_position = None
+# if has_traffic_lights:
+# for det in detections:
+# if is_traffic_light(det.get('class_name')) and 'bbox' in det:
+# traffic_light_bbox = det['bbox']
+# # Extract center point from bbox for crosswalk utils
+# x1, y1, x2, y2 = traffic_light_bbox
+# traffic_light_position = ((x1 + x2) // 2, (y1 + y2) // 2)
+# break
+
+# # Run crosswalk detection ONLY if traffic light is detected
+# crosswalk_bbox, violation_line_y, debug_info = None, None, {}
+# if has_traffic_lights and traffic_light_position is not None:
+# try:
+# print(f"[CROSSWALK] Traffic light detected at {traffic_light_position}, running crosswalk detection")
+# # Use new crosswalk_utils2 logic only when traffic light exists
+# annotated_frame, crosswalk_bbox, violation_line_y, debug_info = detect_crosswalk_and_violation_line(
+# annotated_frame,
+# traffic_light_position=traffic_light_position
+# )
+# print(f"[CROSSWALK] Detection result: crosswalk_bbox={crosswalk_bbox is not None}, violation_line_y={violation_line_y}")
+# # --- Draw crosswalk region if detected and close to traffic light ---
+# # (REMOVED: Do not draw crosswalk box or label)
+# # if crosswalk_bbox is not None:
+# # x, y, w, h = map(int, crosswalk_bbox)
+# # tl_x, tl_y = traffic_light_position
+# # crosswalk_center_y = y + h // 2
+# # distance = abs(crosswalk_center_y - tl_y)
+# # print(f"[CROSSWALK DEBUG] Crosswalk bbox: {crosswalk_bbox}, Traffic light: {traffic_light_position}, vertical distance: {distance}")
+# # if distance < 120:
+# # cv2.rectangle(annotated_frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
+# # cv2.putText(annotated_frame, "Crosswalk", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
+# # # Top and bottom edge of crosswalk
+# # top_edge = y
+# # bottom_edge = y + h
+# # if abs(tl_y - top_edge) < abs(tl_y - bottom_edge):
+# # crosswalk_edge_y = top_edge
+# # else:
+# # crosswalk_edge_y = bottom_edge
+# if crosswalk_bbox is not None:
+# x, y, w, h = map(int, crosswalk_bbox)
+# tl_x, tl_y = traffic_light_position
+# crosswalk_center_y = y + h // 2
+# distance = abs(crosswalk_center_y - tl_y)
+# print(f"[CROSSWALK DEBUG] Crosswalk bbox: {crosswalk_bbox}, Traffic light: {traffic_light_position}, vertical distance: {distance}")
+# # Top and bottom edge of crosswalk
+# top_edge = y
+# bottom_edge = y + h
+# if abs(tl_y - top_edge) < abs(tl_y - bottom_edge):
+# crosswalk_edge_y = top_edge
+# else:
+# crosswalk_edge_y = bottom_edge
+# except Exception as e:
+# print(f"[ERROR] Crosswalk detection failed: {e}")
+# crosswalk_bbox, violation_line_y, debug_info = None, None, {}
+# else:
+# print(f"[CROSSWALK] No traffic light detected (has_traffic_lights={has_traffic_lights}), skipping crosswalk detection")
+# # NO crosswalk detection without traffic light
+# violation_line_y = None
+
+# # Check if crosswalk is detected
+# crosswalk_detected = crosswalk_bbox is not None
+# stop_line_detected = debug_info.get('stop_line') is not None
+
+# # ALWAYS process vehicle tracking (moved outside violation logic)
+# tracked_vehicles = []
+# if hasattr(self, 'vehicle_tracker') and self.vehicle_tracker is not None:
+# try:
+# # Filter vehicle detections
+# vehicle_classes = ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle']
+# vehicle_dets = []
+# h, w = frame.shape[:2]
+
+# print(f"[TRACK DEBUG] Processing {len(detections)} total detections")
+
+# for det in detections:
+# if (det.get('class_name') in vehicle_classes and
+# 'bbox' in det and
+# det.get('confidence', 0) > self.min_confidence_threshold):
+
+# # Check bbox dimensions
+# bbox = det['bbox']
+# x1, y1, x2, y2 = bbox
+# box_w, box_h = x2-x1, y2-y1
+# box_area = box_w * box_h
+# area_ratio = box_area / (w * h)
+
+# print(f"[TRACK DEBUG] Vehicle {det.get('class_name')} conf={det.get('confidence'):.2f}, area_ratio={area_ratio:.4f}")
+
+# if 0.001 <= area_ratio <= 0.25:
+# vehicle_dets.append(det)
+# print(f"[TRACK DEBUG] Added vehicle: {det.get('class_name')} conf={det.get('confidence'):.2f}")
+# else:
+# print(f"[TRACK DEBUG] Rejected vehicle: area_ratio={area_ratio:.4f} not in range [0.001, 0.25]")
+
+# print(f"[TRACK DEBUG] Filtered to {len(vehicle_dets)} vehicle detections")
+
+# # Update tracker
+# if len(vehicle_dets) > 0:
+# print(f"[TRACK DEBUG] Updating tracker with {len(vehicle_dets)} vehicles...")
+# tracks = self.vehicle_tracker.update(vehicle_dets, frame)
+# # Filter out tracks without bbox to avoid warnings
+# valid_tracks = []
+# for track in tracks:
+# bbox = None
+# if isinstance(track, dict):
+# bbox = track.get('bbox', None)
+# else:
+# bbox = getattr(track, 'bbox', None)
+# if bbox is not None:
+# valid_tracks.append(track)
+# else:
+# print(f"Warning: Track has no bbox, skipping: {track}")
+# tracks = valid_tracks
+# print(f"[TRACK DEBUG] Tracker returned {len(tracks)} tracks (after bbox filter)")
+# else:
+# print(f"[TRACK DEBUG] No vehicles to track, skipping tracker update")
+# tracks = []
+
+# # Process each tracked vehicle
+# tracked_vehicles = []
+# track_ids_seen = []
+
+# for track in tracks:
+# track_id = track['id']
+# bbox = track['bbox']
+# x1, y1, x2, y2 = map(float, bbox)
+# center_y = (y1 + y2) / 2
+
+# # Check for duplicate IDs
+# if track_id in track_ids_seen:
+# print(f"[TRACK ERROR] Duplicate ID detected: {track_id}")
+# track_ids_seen.append(track_id)
+
+# print(f"[TRACK DEBUG] Processing track ID={track_id} bbox={bbox}")
+
+# # Initialize or update vehicle history
+# if track_id not in self.vehicle_history:
+# from collections import deque
+# self.vehicle_history[track_id] = deque(maxlen=self.position_history_size)
+
+# # Initialize vehicle status if not exists
+# if track_id not in self.vehicle_statuses:
+# self.vehicle_statuses[track_id] = {
+# 'recent_movement': [],
+# 'violation_history': [],
+# 'crossed_during_red': False,
+# 'last_position': None, # Track last position for jump detection
+# 'suspicious_jumps': 0 # Count suspicious position jumps
+# }
+
+# # Detect suspicious position jumps (potential ID switches)
+# if self.vehicle_statuses[track_id]['last_position'] is not None:
+# last_y = self.vehicle_statuses[track_id]['last_position']
+# center_y = (y1 + y2) / 2
+# position_jump = abs(center_y - last_y)
+
+# if position_jump > self.max_position_jump:
+# self.vehicle_statuses[track_id]['suspicious_jumps'] += 1
+# print(f"[TRACK WARNING] Vehicle ID={track_id} suspicious position jump: {last_y:.1f} -> {center_y:.1f} (jump={position_jump:.1f})")
+
+# # If too many suspicious jumps, reset violation status to be safe
+# if self.vehicle_statuses[track_id]['suspicious_jumps'] > 2:
+# print(f"[TRACK RESET] Vehicle ID={track_id} has too many suspicious jumps, resetting violation status")
+# self.vehicle_statuses[track_id]['crossed_during_red'] = False
+# self.vehicle_statuses[track_id]['suspicious_jumps'] = 0
+
+# # Update position history and last position
+# self.vehicle_history[track_id].append(center_y)
+# self.vehicle_statuses[track_id]['last_position'] = center_y
+
+# # BALANCED movement detection - detect clear movement while avoiding false positives
+# is_moving = False
+# movement_detected = False
+
+# if len(self.vehicle_history[track_id]) >= 3: # Require at least 3 frames for movement detection
+# recent_positions = list(self.vehicle_history[track_id])
+
+# # Check movement over 3 frames for quick response
+# if len(recent_positions) >= 3:
+# movement_3frames = abs(recent_positions[-1] - recent_positions[-3])
+# if movement_3frames > self.movement_threshold: # More responsive threshold
+# movement_detected = True
+# print(f"[MOVEMENT] Vehicle ID={track_id} MOVING: 3-frame movement = {movement_3frames:.1f}")
+
+# # Confirm with longer movement for stability (if available)
+# if len(recent_positions) >= 5:
+# movement_5frames = abs(recent_positions[-1] - recent_positions[-5])
+# if movement_5frames > self.movement_threshold * 1.5: # Moderate threshold for 5 frames
+# movement_detected = True
+# print(f"[MOVEMENT] Vehicle ID={track_id} MOVING: 5-frame movement = {movement_5frames:.1f}")
+
+# # Store historical movement for smoothing - require consistent movement
+# self.vehicle_statuses[track_id]['recent_movement'].append(movement_detected)
+# if len(self.vehicle_statuses[track_id]['recent_movement']) > 4: # Shorter history for quicker response
+# self.vehicle_statuses[track_id]['recent_movement'].pop(0)
+
+# # BALANCED: Require majority of recent frames to show movement (2 out of 4)
+# recent_movement_count = sum(self.vehicle_statuses[track_id]['recent_movement'])
+# total_recent_frames = len(self.vehicle_statuses[track_id]['recent_movement'])
+# if total_recent_frames >= 2 and recent_movement_count >= (total_recent_frames * 0.5): # 50% of frames must show movement
+# is_moving = True
+
+# print(f"[TRACK DEBUG] Vehicle ID={track_id} is_moving={is_moving} (threshold={self.movement_threshold})")
+
+# # Initialize as not violating
+# is_violation = False
+
+# tracked_vehicles.append({
+# 'id': track_id,
+# 'bbox': bbox,
+# 'center_y': center_y,
+# 'is_moving': is_moving,
+# 'is_violation': is_violation
+# })
+
+# print(f"[DEBUG] ByteTrack tracked {len(tracked_vehicles)} vehicles")
+# for i, tracked in enumerate(tracked_vehicles):
+# print(f" Vehicle {i}: ID={tracked['id']}, center_y={tracked['center_y']:.1f}, moving={tracked['is_moving']}, violating={tracked['is_violation']}")
+
+# # DEBUG: Print all tracked vehicle IDs and their bboxes for this frame
+# if tracked_vehicles:
+# print(f"[DEBUG] All tracked vehicles this frame:")
+# for v in tracked_vehicles:
+# print(f" ID={v['id']} bbox={v['bbox']} center_y={v.get('center_y', 'NA')}")
+# else:
+# print("[DEBUG] No tracked vehicles this frame!")
+
+# # Clean up old vehicle data
+# current_track_ids = [tracked['id'] for tracked in tracked_vehicles]
+# self._cleanup_old_vehicle_data(current_track_ids)
+
+# except Exception as e:
+# print(f"[ERROR] Vehicle tracking failed: {e}")
+# import traceback
+# traceback.print_exc()
+# else:
+# print("[WARN] ByteTrack vehicle tracker not available!")
+
+# # Process violations - CHECK VEHICLES THAT CROSS THE LINE OVER A WINDOW OF FRAMES
+# # IMPORTANT: Only process violations if BOTH traffic light is detected AND crosswalk is detected AND red light AND violation line exists
+
+# # Handle case when no traffic light is detected in video
+# if not has_traffic_lights:
+# print("[INFO] No traffic light detected in video - violation detection disabled")
+# # Emit status to UI (only once per session to avoid spam)
+# if not hasattr(self, '_no_traffic_light_notified'):
+# self.stats_ready.emit({
+# 'status': 'monitoring_only',
+# 'message': 'No traffic light detected - monitoring vehicles only',
+# 'violation_detection_active': False,
+# 'timestamp': time.time()
+# })
+# self._no_traffic_light_notified = True
+# else:
+# # Check if traffic light is red (only when traffic light exists)
+# is_red_light = self.latest_traffic_light and self.latest_traffic_light.get('color') == 'red'
+
+# # New condition: ALL of these must be true for violation line processing:
+# # 1. Traffic lights detected (has_traffic_lights)
+# # 2. Crosswalk detected (crosswalk_detected)
+# # 3. Red light is currently active (is_red_light)
+# # 4. Violation line exists (violation_line_y is not None)
+# # 5. Vehicles are being tracked (tracked_vehicles)
+# if (has_traffic_lights and crosswalk_detected and is_red_light and
+# violation_line_y is not None and tracked_vehicles):
+# print(f"[VIOLATION DEBUG] ALL CONDITIONS MET - Traffic light: {has_traffic_lights}, Crosswalk: {crosswalk_detected}, Red light: {is_red_light}, Line Y: {violation_line_y}, Vehicles: {len(tracked_vehicles)}")
+
+# # Check each tracked vehicle for violations
+# for tracked in tracked_vehicles:
+# track_id = tracked['id']
+# center_y = tracked['center_y']
+# is_moving = tracked['is_moving']
+
+# # Get position history for this vehicle
+# position_history = list(self.vehicle_history[track_id])
+
+# # Enhanced crossing detection: check over a window of frames
+# line_crossed_in_window = False
+# crossing_details = None
+# if len(position_history) >= 2:
+# window_size = min(self.crossing_check_window, len(position_history))
+# for i in range(1, window_size):
+# prev_y = position_history[-(i+1)] # Earlier position
+# curr_y = position_history[-i] # Later position
+# # Check if vehicle crossed the line in this frame pair
+# if prev_y < violation_line_y and curr_y >= violation_line_y:
+# line_crossed_in_window = True
+# crossing_details = {
+# 'frames_ago': i,
+# 'prev_y': prev_y,
+# 'curr_y': curr_y,
+# 'window_checked': window_size
+# }
+# print(f"[VIOLATION DEBUG] Vehicle ID={track_id} crossed line {i} frames ago: {prev_y:.1f} -> {curr_y:.1f}")
+# break
+
+# print(f"[VIOLATION DEBUG] Vehicle ID={track_id}: latest_traffic_light={self.latest_traffic_light}")
+# print(f"[VIOLATION DEBUG] Vehicle ID={track_id}: position_history={[f'{p:.1f}' for p in position_history[-5:]]}"); # Show last 5 positions
+# print(f"[VIOLATION DEBUG] Vehicle ID={track_id}: line_crossed_in_window={line_crossed_in_window}, crossing_details={crossing_details}")
+
+# # Enhanced violation detection: vehicle crossed the line while moving (red light already verified above)
+# actively_crossing = (line_crossed_in_window and is_moving)
+
+# # Initialize violation status for new vehicles
+# if 'crossed_during_red' not in self.vehicle_statuses[track_id]:
+# self.vehicle_statuses[track_id]['crossed_during_red'] = False
+
+# # Mark vehicle as having crossed during red if it actively crosses
+# if actively_crossing:
+# # Additional validation: ensure it's not a false positive from ID switch
+# suspicious_jumps = self.vehicle_statuses[track_id].get('suspicious_jumps', 0)
+# if suspicious_jumps <= 1: # Allow crossing if not too many suspicious jumps
+# self.vehicle_statuses[track_id]['crossed_during_red'] = True
+# print(f"[VIOLATION ALERT] Vehicle ID={track_id} CROSSED line during red light!")
+# print(f" -> Crossing details: {crossing_details}")
+# else:
+# print(f"[VIOLATION IGNORED] Vehicle ID={track_id} crossing ignored due to {suspicious_jumps} suspicious jumps")
+
+# # IMPORTANT: Reset violation status when light turns green (regardless of position)
+# if not is_red_light:
+# if self.vehicle_statuses[track_id]['crossed_during_red']:
+# print(f"[VIOLATION RESET] Vehicle ID={track_id} violation status reset (light turned green)")
+# self.vehicle_statuses[track_id]['crossed_during_red'] = False
+
+# # Vehicle is violating ONLY if it crossed during red and light is still red
+# is_violation = (self.vehicle_statuses[track_id]['crossed_during_red'] and is_red_light)
+
+# # Track current violation state for analytics - only actual crossings
+# self.vehicle_statuses[track_id]['violation_history'].append(actively_crossing)
+# if len(self.vehicle_statuses[track_id]['violation_history']) > 5:
+# self.vehicle_statuses[track_id]['violation_history'].pop(0)
+
+# print(f"[VIOLATION DEBUG] Vehicle ID={track_id}: center_y={center_y:.1f}, line={violation_line_y}")
+# print(f" history_window={[f'{p:.1f}' for p in position_history[-self.crossing_check_window:]]}")
+# print(f" moving={is_moving}, red_light={is_red_light}")
+# print(f" actively_crossing={actively_crossing}, crossed_during_red={self.vehicle_statuses[track_id]['crossed_during_red']}")
+# print(f" suspicious_jumps={self.vehicle_statuses[track_id].get('suspicious_jumps', 0)}")
+# print(f" FINAL_VIOLATION={is_violation}")
+
+# # Update violation status
+# tracked['is_violation'] = is_violation
+
+# if actively_crossing and self.vehicle_statuses[track_id].get('suspicious_jumps', 0) <= 1: # Only add if not too many suspicious jumps
+# # Add to violating vehicles set
+# violating_vehicle_ids.add(track_id)
+
+# # Add to violations list
+# timestamp = datetime.now() # Keep as datetime object, not string
+# violations.append({
+# 'track_id': track_id,
+# 'id': track_id,
+# 'bbox': [int(tracked['bbox'][0]), int(tracked['bbox'][1]), int(tracked['bbox'][2]), int(tracked['bbox'][3])],
+# 'violation': 'line_crossing',
+# 'violation_type': 'line_crossing', # Add this for analytics compatibility
+# 'timestamp': timestamp,
+# 'line_position': violation_line_y,
+# 'movement': crossing_details if crossing_details else {'prev_y': center_y, 'current_y': center_y},
+# 'crossing_window': self.crossing_check_window,
+# 'position_history': list(position_history[-10:]) # Include recent history for debugging
+# })
+
+# print(f"[DEBUG] 🚨 VIOLATION DETECTED: Vehicle ID={track_id} CROSSED VIOLATION LINE")
+# print(f" Enhanced detection: {crossing_details}")
+# print(f" Position history: {[f'{p:.1f}' for p in position_history[-10:]]}")
+# print(f" Detection window: {self.crossing_check_window} frames")
+# print(f" while RED LIGHT & MOVING")
+
+# else:
+# # Log why violation detection was skipped
+# reasons = []
+# if not crosswalk_detected:
+# reasons.append("No crosswalk detected")
+# if not is_red_light:
+# reasons.append(f"Light not red (current: {self.latest_traffic_light.get('color') if self.latest_traffic_light else 'None'})")
+# if violation_line_y is None:
+# reasons.append("No violation line")
+# if not tracked_vehicles:
+# reasons.append("No vehicles tracked")
+
+# if reasons:
+# print(f"[INFO] Violation detection skipped: {', '.join(reasons)}")
+
+# # --- ENHANCED VIOLATION DETECTION: Add new real-world scenarios ---
+# # 1. Pedestrian right-of-way violation (blocking crosswalk during green)
+# # 2. Improper stopping over crosswalk at red
+# # 3. Accelerating through yellow/amber light
+# pedestrian_dets = [det for det in detections if det.get('class_name') == 'person' and 'bbox' in det]
+# pedestrian_tracks = []
+# for ped in pedestrian_dets:
+# x1, y1, x2, y2 = ped['bbox']
+# center = ((x1 + x2) // 2, (y1 + y2) // 2)
+# pedestrian_tracks.append({'bbox': ped['bbox'], 'center': center})
+
+# # Prepare crosswalk polygon for point-in-polygon checks
+# crosswalk_poly = None
+# if crosswalk_bbox is not None:
+# x, y, w, h = crosswalk_bbox
+# crosswalk_poly = (x, y, w, h)
+# stopline_poly = crosswalk_poly # For simplicity, use crosswalk as stopline
+
+# # Track amber/yellow light start time
+# amber_start_time = getattr(self, 'amber_start_time', None)
+# latest_light_color = self.latest_traffic_light.get('color') if isinstance(self.latest_traffic_light, dict) else self.latest_traffic_light
+# if latest_light_color == 'yellow' and amber_start_time is None:
+# amber_start_time = time.time()
+# self.amber_start_time = amber_start_time
+# elif latest_light_color != 'yellow':
+# self.amber_start_time = None
+
+# # Vehicle position history for speed calculation
+# vehicle_position_history = {}
+# for track in tracked_vehicles:
+# track_id = track['id']
+# bbox = track['bbox']
+# x1, y1, x2, y2 = bbox
+# center = ((x1 + x2) // 2, (y1 + y2) // 2)
+# # Store (center, timestamp)
+# if track_id not in vehicle_position_history:
+# vehicle_position_history[track_id] = []
+# vehicle_position_history[track_id].append((center, time.time()))
+# track['center'] = center
+
+# # --- 1. Pedestrian right-of-way violation ---
+# if crosswalk_poly and latest_light_color == 'green' and pedestrian_tracks:
+# for track in tracked_vehicles:
+# # Only consider moving vehicles - stopped vehicles aren't likely to be violating
+# if track.get('is_moving', False) and point_in_polygon(track['center'], crosswalk_poly):
+# # Check for pedestrians in the crosswalk
+# for ped in pedestrian_tracks:
+# if point_in_polygon(ped['center'], crosswalk_poly):
+# # Vehicle is blocking crosswalk during green with pedestrian present
+# violations.append({
+# 'track_id': track['id'],
+# 'id': track['id'],
+# 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])],
+# 'violation': 'pedestrian_right_of_way',
+# 'violation_type': 'pedestrian_right_of_way',
+# 'timestamp': datetime.now(),
+# 'details': {
+# 'pedestrian_bbox': ped['bbox'],
+# 'crosswalk_bbox': crosswalk_bbox,
+# 'is_moving': track.get('is_moving', False),
+# 'traffic_light': latest_light_color
+# }
+# })
+# print(f"[VIOLATION] Pedestrian right-of-way violation: Vehicle ID={track['id']} blocking crosswalk during green light with pedestrian present")
+
+# # --- 2. Improper stopping over crosswalk at red ---
+# if crosswalk_poly and latest_light_color == 'red':
+# for track in tracked_vehicles:
+# # Check if vehicle is not moving (to confirm it's stopped)
+# is_stopped = not track.get('is_moving', True)
+
+# if is_stopped and point_in_polygon(track['center'], crosswalk_poly):
+# # Calculate overlap ratio between vehicle and crosswalk
+# vx1, vy1, vx2, vy2 = track['bbox']
+# cx, cy, cw, ch = crosswalk_poly
+# overlap_x1 = max(vx1, cx)
+# overlap_y1 = max(vy1, cy)
+# overlap_x2 = min(vx2, cx + cw)
+# overlap_y2 = min(vy2, cy + ch)
+# overlap_area = max(0, overlap_x2 - overlap_x1) * max(0, overlap_y2 - overlap_y1)
+# vehicle_area = (vx2 - vx1) * (vy2 - vy1)
+# overlap_ratio = overlap_area / max(vehicle_area, 1)
+
+# # Double-verify that vehicle is stopped by checking explicit speed
+# speed = 0.0
+# hist = vehicle_position_history.get(track['id'], [])
+# if len(hist) >= 2:
+# (c1, t1), (c2, t2) = hist[-2], hist[-1]
+# dist = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5
+# dt = max(t2-t1, 1e-3)
+# speed = dist / dt
+
+# # Vehicle must have significant overlap with crosswalk (>25%) and be stopped
+# if overlap_ratio > 0.25 and speed < 0.8:
+# violations.append({
+# 'track_id': track['id'],
+# 'id': track['id'],
+# 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])],
+# 'violation': 'stop_on_crosswalk',
+# 'violation_type': 'stop_on_crosswalk',
+# 'timestamp': datetime.now(),
+# 'details': {
+# 'overlap_ratio': overlap_ratio,
+# 'speed': speed,
+# 'crosswalk_bbox': crosswalk_bbox,
+# 'traffic_light': latest_light_color,
+# 'is_moving_flag': track.get('is_moving', None)
+# }
+# })
+# print(f"[VIOLATION] Improper stop on crosswalk: Vehicle ID={track['id']} stopped on crosswalk during red light (overlap={overlap_ratio:.2f}, speed={speed:.2f})")
+
+# # --- 3. Accelerating through yellow/amber light ---
+# if stopline_poly and latest_light_color == 'yellow' and amber_start_time:
+# # Calculate time since light turned yellow
+# current_time = time.time()
+# time_since_yellow = current_time - amber_start_time
+
+# # Speed threshold (in pixels per second) - can be adjusted based on testing
+# speed_limit_px_per_sec = 8.0
+
+# # Check each vehicle approaching the intersection
+# for track in tracked_vehicles:
+# # Check if vehicle is near the stop line/intersection
+# if point_in_polygon(track['center'], stopline_poly) or (
+# track['center'][1] < stopline_poly[1] + stopline_poly[3] + 50 and
+# track['center'][1] > stopline_poly[1] - 50
+# ):
+# # If the vehicle is moving (confirmed via tracker)
+# if track.get('is_moving', False):
+# # Calculate acceleration by looking at recent speed changes
+# hist = vehicle_position_history.get(track['id'], [])
+# if len(hist) >= 3:
+# # Calculate speeds at different time points
+# (c1, t1), (c2, t2), (c3, t3) = hist[-3], hist[-2], hist[-1]
+
+# # Speed at earlier point
+# v1 = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5 / max(t2-t1, 1e-3)
+
+# # Speed at later point
+# v2 = ((c3[0]-c2[0])**2 + (c3[1]-c2[1])**2)**0.5 / max(t3-t2, 1e-3)
+
+# # Acceleration violation if:
+# # 1. Speed increases significantly (>20%)
+# # 2. Final speed exceeds threshold
+# # 3. Yellow light is less than 3 seconds old (typical acceleration window)
+# if v2 > v1 * 1.2 and v2 > speed_limit_px_per_sec and time_since_yellow < 3.0:
+# violations.append({
+# 'track_id': track['id'],
+# 'id': track['id'],
+# 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])],
+# 'violation': 'amber_acceleration',
+# 'violation_type': 'amber_acceleration',
+# 'timestamp': datetime.now(),
+# 'details': {
+# 'speed_before': v1,
+# 'speed_after': v2,
+# 'acceleration': (v2-v1)/max(t3-t2, 1e-3),
+# 'time_since_yellow': time_since_yellow,
+# 'traffic_light': latest_light_color
+# }
+# })
+# print(f"[VIOLATION] Amber light acceleration: Vehicle ID={track['id']} accelerated from {v1:.2f} to {v2:.2f} px/sec {time_since_yellow:.1f}s after yellow light")
+
+# # Emit progress signal after processing each frame
+# if hasattr(self, 'progress_ready'):
+# self.progress_ready.emit(int(cap.get(cv2.CAP_PROP_POS_FRAMES)), int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), time.time())
+
+# # Draw detections with bounding boxes - NOW with violation info
+# # Only show traffic light and vehicle classes
+# allowed_classes = ['traffic light', 'car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle']
+# filtered_detections = [det for det in detections if det.get('class_name') in allowed_classes]
+# print(f"Drawing {len(filtered_detections)} detection boxes on frame (filtered)")
+# # Statistics for debugging
+# vehicles_with_ids = 0
+# vehicles_without_ids = 0
+# vehicles_moving = 0
+# vehicles_violating = 0
+
+# if detections and len(detections) > 0:
+# # Only show traffic light and vehicle classes
+# allowed_classes = ['traffic light', 'car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle']
+# filtered_detections = [det for det in detections if det.get('class_name') in allowed_classes]
+# print(f"Drawing {len(filtered_detections)} detection boxes on frame (filtered)")
+# # Statistics for debugging
+# vehicles_with_ids = 0
+# vehicles_without_ids = 0
+# vehicles_moving = 0
+# vehicles_violating = 0
+# for det in filtered_detections:
+# if 'bbox' in det:
+# bbox = det['bbox']
+# x1, y1, x2, y2 = map(int, bbox)
+# label = det.get('class_name', 'object')
+# confidence = det.get('confidence', 0.0)
+
+# # Robustness: ensure label and confidence are not None
+# if label is None:
+# label = 'object'
+# if confidence is None:
+# confidence = 0.0
+# class_id = det.get('class_id', -1)
+
+# # Check if this detection corresponds to a violating or moving vehicle
+# det_center_x = (x1 + x2) / 2
+# det_center_y = (y1 + y2) / 2
+# is_violating_vehicle = False
+# is_moving_vehicle = False
+# vehicle_id = None
+
+# # Match detection with tracked vehicles - IMPROVED MATCHING
+# if label in ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle'] and len(tracked_vehicles) > 0:
+# print(f"[MATCH DEBUG] Attempting to match {label} detection at ({det_center_x:.1f}, {det_center_y:.1f}) with {len(tracked_vehicles)} tracked vehicles")
+# best_match = None
+# best_distance = float('inf')
+# best_iou = 0.0
+
+# for i, tracked in enumerate(tracked_vehicles):
+# track_bbox = tracked['bbox']
+# track_x1, track_y1, track_x2, track_y2 = map(float, track_bbox)
+
+# # Calculate center distance
+# track_center_x = (track_x1 + track_x2) / 2
+# track_center_y = (track_y1 + track_y2) / 2
+# center_distance = ((det_center_x - track_center_x)**2 + (det_center_y - track_center_y)**2)**0.5
+
+# # Calculate IoU (Intersection over Union)
+# intersection_x1 = max(x1, track_x1)
+# intersection_y1 = max(y1, track_y1)
+# intersection_x2 = min(x2, track_x2)
+# intersection_y2 = min(y2, track_y2)
+
+# if intersection_x2 > intersection_x1 and intersection_y2 > intersection_y1:
+# intersection_area = (intersection_x2 - intersection_x1) * (intersection_y2 - intersection_y1)
+# det_area = (x2 - x1) * (y2 - y1)
+# track_area = (track_x2 - track_x1) * (track_y2 - track_y1)
+# union_area = det_area + track_area - intersection_area
+# iou = intersection_area / union_area if union_area > 0 else 0
+# else:
+# iou = 0
+
+# print(f"[MATCH DEBUG] Track {i}: ID={tracked['id']}, center=({track_center_x:.1f}, {track_center_y:.1f}), distance={center_distance:.1f}, IoU={iou:.3f}")
+
+# # Use stricter matching criteria - prioritize IoU over distance
+# # Good match if: high IoU OR close center distance with some overlap
+# is_good_match = (iou > 0.3) or (center_distance < 60 and iou > 0.1)
+
+# if is_good_match:
+# print(f"[MATCH DEBUG] Track {i} is a good match (IoU={iou:.3f}, distance={center_distance:.1f})")
+# # Prefer higher IoU, then lower distance
+# match_score = iou + (100 - min(center_distance, 100)) / 100 # Composite score
+# if iou > best_iou or (iou == best_iou and center_distance < best_distance):
+# best_distance = center_distance
+# best_iou = iou
+# best_match = tracked
+# else:
+# print(f"[MATCH DEBUG] Track {i} failed matching criteria (IoU={iou:.3f}, distance={center_distance:.1f})")
+
+# if best_match:
+# vehicle_id = best_match['id']
+# is_moving_vehicle = best_match.get('is_moving', False)
+# is_violating_vehicle = best_match.get('is_violation', False)
+# print(f"[MATCH SUCCESS] Detection at ({det_center_x:.1f},{det_center_y:.1f}) matched with track ID={vehicle_id}")
+# print(f" -> STATUS: moving={is_moving_vehicle}, violating={is_violating_vehicle}, IoU={best_iou:.3f}, distance={best_distance:.1f}")
+# else:
+# print(f"[MATCH FAILED] No suitable match found for {label} detection at ({det_center_x:.1f}, {det_center_y:.1f})")
+# print(f" -> Will draw as untracked detection with default color")
+# else:
+# if label not in ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle']:
+# print(f"[MATCH DEBUG] Skipping matching for non-vehicle label: {label}")
+# elif len(tracked_vehicles) == 0:
+# print(f"[MATCH DEBUG] No tracked vehicles available for matching")
+# else:
+# try:
+# if len(tracked_vehicles) > 0:
+# distances = [((det_center_x - (t['bbox'][0] + t['bbox'][2])/2)**2 + (det_center_y - (t['bbox'][1] + t['bbox'][3])/2)**2)**0.5 for t in tracked_vehicles[:3]]
+# print(f"[DEBUG] No match found for detection at ({det_center_x:.1f},{det_center_y:.1f}) - distances: {distances}")
+# else:
+# print(f"[DEBUG] No tracked vehicles available to match detection at ({det_center_x:.1f},{det_center_y:.1f})")
+# except NameError:
+# print(f"[DEBUG] No match found for detection (coords unavailable)")
+# if len(tracked_vehicles) > 0:
+# print(f"[DEBUG] Had {len(tracked_vehicles)} tracked vehicles available")
+
+# # Choose box color based on vehicle status
+# # PRIORITY: 1. Violating (RED) - crossed during red light 2. Moving (ORANGE) 3. Stopped (GREEN)
+# if is_violating_vehicle and vehicle_id is not None:
+# box_color = (0, 0, 255) # RED for violating vehicles (crossed line during red)
+# label_text = f"{label}:ID{vehicle_id}⚠️"
+# thickness = 4
+# vehicles_violating += 1
+# print(f"[COLOR DEBUG] Drawing RED box for VIOLATING vehicle ID={vehicle_id} (crossed during red)")
+# elif is_moving_vehicle and vehicle_id is not None and not is_violating_vehicle:
+# box_color = (0, 165, 255) # ORANGE for moving vehicles (not violating)
+# label_text = f"{label}:ID{vehicle_id}"
+# thickness = 3
+# vehicles_moving += 1
+# print(f"[COLOR DEBUG] Drawing ORANGE box for MOVING vehicle ID={vehicle_id} (not violating)")
+# elif label in ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle'] and vehicle_id is not None:
+# box_color = (0, 255, 0) # Green for stopped vehicles
+# label_text = f"{label}:ID{vehicle_id}"
+# thickness = 2
+# print(f"[COLOR DEBUG] Drawing GREEN box for STOPPED vehicle ID={vehicle_id}")
+# elif is_traffic_light(label):
+# box_color = (0, 0, 255) # Red for traffic lights
+# label_text = f"{label}"
+# thickness = 2
+# else:
+# box_color = (0, 255, 0) # Default green for other objects
+# label_text = f"{label}"
+# thickness = 2
+
+# # Update statistics
+# if label in ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle']:
+# if vehicle_id is not None:
+# vehicles_with_ids += 1
+# else:
+# vehicles_without_ids += 1
+
+# # Draw rectangle and label
+# cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), box_color, thickness)
+# cv2.putText(annotated_frame, label_text, (x1, y1-10),
+# cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color, 2)
+# # id_text = f"ID: {det['id']}"
+# # # Calculate text size for background
+# # (tw, th), baseline = cv2.getTextSize(id_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)
+# # # Draw filled rectangle for background (top-left of bbox)
+# # cv2.rectangle(annotated_frame, (x1, y1 - th - 8), (x1 + tw + 4, y1), (0, 0, 0), -1)
+# # # Draw the ID text in bold yellow
+# # cv2.putText(annotated_frame, id_text, (x1 + 2, y1 - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2, cv2.LINE_AA)
+# # print(f"[DEBUG] Detection ID: {det['id']} BBOX: {bbox} CLASS: {label} CONF: {confidence:.2f}")
+
+# if class_id == 9 or is_traffic_light(label):
+# try:
+# light_info = detect_traffic_light_color(annotated_frame, [x1, y1, x2, y2])
+# if light_info.get("color", "unknown") == "unknown":
+# light_info = ensure_traffic_light_color(annotated_frame, [x1, y1, x2, y2])
+# det['traffic_light_color'] = light_info
+# # Draw enhanced traffic light status
+# annotated_frame = draw_traffic_light_status(annotated_frame, bbox, light_info)
+
+# # --- Update latest_traffic_light for UI/console ---
+# self.latest_traffic_light = light_info
+
+# # Add a prominent traffic light status at the top of the frame
+# color = light_info.get('color', 'unknown')
+# confidence = light_info.get('confidence', 0.0)
+
+# if color == 'red':
+# status_color = (0, 0, 255) # Red
+# status_text = f"Traffic Light: RED ({confidence:.2f})"
+
+# # Draw a prominent red banner across the top
+# banner_height = 40
+# cv2.rectangle(annotated_frame, (0, 0), (annotated_frame.shape[1], banner_height), (0, 0, 150), -1)
+
+# # Add text
+# font = cv2.FONT_HERSHEY_DUPLEX
+# font_scale = 0.9
+# font_thickness = 2
+# cv2.putText(annotated_frame, status_text, (10, banner_height-12), font,
+# font_scale, (255, 255, 255), font_thickness)
+# except Exception as e:
+# print(f"[WARN] Could not detect/draw traffic light color: {e}")
+
+# # Print statistics summary
+# print(f"[STATS] Vehicles: {vehicles_with_ids} with IDs, {vehicles_without_ids} without IDs")
+
+# # Handle multiple traffic lights with consensus approach
+# for det in detections:
+# if is_traffic_light(det.get('class_name')):
+# has_traffic_lights = True
+# if 'traffic_light_color' in det:
+# light_info = det['traffic_light_color']
+# traffic_lights.append({'bbox': det['bbox'], 'color': light_info.get('color', 'unknown'), 'confidence': light_info.get('confidence', 0.0)})
+
+# # Determine the dominant traffic light color based on confidence
+# if traffic_lights:
+# # Filter to just red lights and sort by confidence
+# red_lights = [tl for tl in traffic_lights if tl.get('color') == 'red']
+# if red_lights:
+# # Use the highest confidence red light for display
+# highest_conf_red = max(red_lights, key=lambda x: x.get('confidence', 0))
+# # Update the global traffic light status for consistent UI display
+# self.latest_traffic_light = {
+# 'color': 'red',
+# 'confidence': highest_conf_red.get('confidence', 0.0)
+# }
+
+# # Emit all violations as a batch for UI (optional)
+# if violations:
+# if hasattr(self, 'violations_batch_ready'):
+# self.violations_batch_ready.emit(violations)
+# # Emit individual violation signals for each violation
+# for violation in violations:
+# print(f"🚨 Emitting RED LIGHT VIOLATION: Track ID {violation['track_id']}")
+# violation['frame'] = frame
+# violation['violation_line_y'] = violation_line_y
+# self.violation_detected.emit(violation)
+# print(f"[DEBUG] Emitted {len(violations)} violation signals")
+
+# # Add FPS display directly on frame
+# # cv2.putText(annotated_frame, f"FPS: {fps_smoothed:.1f}", (10, 30),
+# # cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
+
+# # # --- Always draw detected traffic light color indicator at top ---
+# # color = self.latest_traffic_light.get('color', 'unknown') if isinstance(self.latest_traffic_light, dict) else str(self.latest_traffic_light)
+# # confidence = self.latest_traffic_light.get('confidence', 0.0) if isinstance(self.latest_traffic_light, dict) else 0.0
+# # indicator_size = 30
+# # margin = 10
+# # status_colors = {
+# # "red": (0, 0, 255),
+# # "yellow": (0, 255, 255),
+# # "green": (0, 255, 0),
+# # "unknown": (200, 200, 200)
+# # }
+# # draw_color = status_colors.get(color, (200, 200, 200))
+# # # Draw circle indicator
+# # cv2.circle(
+# # annotated_frame,
+# # (annotated_frame.shape[1] - margin - indicator_size, margin + indicator_size),
+# # indicator_size,
+# # draw_color,
+# # -1
+# # )
+# # # Add color text
+# # cv2.putText(
+# # annotated_frame,
+# # f"{color.upper()} ({confidence:.2f})",
+# # (annotated_frame.shape[1] - margin - indicator_size - 120, margin + indicator_size + 10),
+# # cv2.FONT_HERSHEY_SIMPLEX,
+# # 0.7,
+# # (0, 0, 0),
+# # 2
+# # )
+
+# # Signal for raw data subscribers (now without violations)
+# # Emit with correct number of arguments
+# try:
+# self.raw_frame_ready.emit(frame.copy(), detections, fps_smoothed)
+# print(f"✅ raw_frame_ready signal emitted with {len(detections)} detections, fps={fps_smoothed:.1f}")
+# except Exception as e:
+# print(f"❌ Error emitting raw_frame_ready: {e}")
+# import traceback
+# traceback.print_exc()
+
+# # Emit the NumPy frame signal for direct display - annotated version for visual feedback
+# print(f"🔴 Emitting frame_np_ready signal with annotated_frame shape: {annotated_frame.shape}")
+# try:
+# # Make sure the frame can be safely transmitted over Qt's signal system
+# # Create a contiguous copy of the array
+# frame_copy = np.ascontiguousarray(annotated_frame)
+# print(f"🔍 Debug - Before emission: frame_copy type={type(frame_copy)}, shape={frame_copy.shape}, is_contiguous={frame_copy.flags['C_CONTIGUOUS']}")
+# self.frame_np_ready.emit(frame_copy)
+# print("✅ frame_np_ready signal emitted successfully")
+# except Exception as e:
+# print(f"❌ Error emitting frame: {e}")
+# import traceback
+# traceback.print_exc()
+
+# # Emit QPixmap for video detection tab (frame_ready)
+# try:
+# from PySide6.QtGui import QImage, QPixmap
+# rgb_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
+# h, w, ch = rgb_frame.shape
+# bytes_per_line = ch * w
+# qimg = QImage(rgb_frame.data, w, h, bytes_per_line, QImage.Format_RGB888)
+# pixmap = QPixmap.fromImage(qimg)
+# metrics = {
+# 'FPS': fps_smoothed,
+# 'Detection (ms)': detection_time
+# }
+# self.frame_ready.emit(pixmap, detections, metrics)
+# print("✅ frame_ready signal emitted for video detection tab")
+# except Exception as e:
+# print(f"❌ Error emitting frame_ready: {e}")
+# import traceback
+# traceback.print_exc()
+
+# # Emit stats signal for performance monitoring
+# # Count traffic lights for UI (confidence >= 0.5)
+# traffic_light_count = 0
+# for det in detections:
+# if is_traffic_light(det.get('class_name')):
+# tl_conf = 0.0
+# if 'traffic_light_color' in det and isinstance(det['traffic_light_color'], dict):
+# tl_conf = det['traffic_light_color'].get('confidence', 0.0)
+# if tl_conf >= 0.5:
+# traffic_light_count += 1
+# # Count cars for UI (confidence >= 0.5)
+# car_count = 0
+# for det in detections:
+# if det.get('class_name') == 'car' and det.get('confidence', 0.0) >= 0.5:
+# car_count += 1
+# # Get model information from model manager
+# model_info = {}
+# if self.model_manager and hasattr(self.model_manager, 'get_current_model_info'):
+# model_info = self.model_manager.get_current_model_info()
+# print(f"🔧 DEBUG: Model info from manager: {model_info}")
+
+# stats = {
+# 'fps': fps_smoothed,
+# 'detection_fps': fps_smoothed, # Numeric value for analytics
+# 'detection_time': detection_time,
+# 'detection_time_ms': detection_time, # Numeric value for analytics
+# 'traffic_light_color': self.latest_traffic_light,
+# 'tlights': traffic_light_count, # Only confident traffic lights
+# 'cars': car_count, # Only confident cars
+# 'model_path': model_info.get('model_path', ''), # Add model path for UI
+# 'model_name': model_info.get('model_name', 'Unknown') # Add model name for UI
+# }
+# print(f"🔧 DEBUG: Stats with model info: model_name={stats.get('model_name')}, model_path={stats.get('model_path')}")
+
+# # Print detailed stats for debugging
+# tl_color = "unknown"
+# if isinstance(self.latest_traffic_light, dict):
+# tl_color = self.latest_traffic_light.get('color', 'unknown')
+# elif isinstance(self.latest_traffic_light, str):
+# tl_color = self.latest_traffic_light
+
+# print(f"🟢 Stats Updated: FPS={fps_smoothed:.2f}, Inference={detection_time:.2f}ms, Traffic Light={tl_color}")
+
+# # Emit stats signal
+# self.stats_ready.emit(stats)
+
+# # Emit performance stats for performance graphs
+# perf_stats = {
+# 'frame_idx': self.frame_count,
+# 'fps': fps_smoothed,
+# 'inference_time': detection_time,
+# 'device': getattr(self, 'current_device', 'CPU'),
+# 'resolution': getattr(self, 'current_resolution', f'{frame.shape[1]}x{frame.shape[0]}' if frame is not None else '-'),
+# 'model_name': model_info.get('model_name', 'Unknown'), # Add model name for performance graphs
+# 'is_spike': False, # TODO: Add spike logic if needed
+# 'is_res_change': False, # TODO: Add res change logic if needed
+# 'cpu_spike': False, # TODO: Add cpu spike logic if needed
+# }
+# print(f"[PERF] Emitting performance_stats_ready: {perf_stats}")
+# self.performance_stats_ready.emit(perf_stats)
+
+# # --- Update last analysis data for VLM ---
+# self._last_analysis_data = {
+# 'detections': detections,
+# 'tracked_vehicles': tracked_vehicles if 'tracked_vehicles' in locals() else [],
+# 'traffic_light': self.latest_traffic_light,
+# 'crosswalk_bbox': crosswalk_bbox if 'crosswalk_bbox' in locals() else None,
+# 'violation_line_y': violation_line_y if 'violation_line_y' in locals() else None,
+# 'crosswalk_detected': crosswalk_bbox is not None if 'crosswalk_bbox' in locals() else False,
+# 'traffic_light_position': traffic_light_position if has_traffic_lights else None,
+# 'frame_shape': frame.shape if frame is not None else None,
+# 'timestamp': time.time()
+# }
+
+# # --- Ensure analytics update every frame ---
+# # Always add traffic_light_color to each detection dict for analytics
+# for det in detections:
+# if is_traffic_light(det.get('class_name')):
+# if 'traffic_light_color' not in det:
+# det['traffic_light_color'] = self.latest_traffic_light if hasattr(self, 'latest_traffic_light') else {'color': 'unknown', 'confidence': 0.0}
+# if hasattr(self, 'analytics_controller') and self.analytics_controller is not None:
+# try:
+# self.analytics_controller.process_frame_data(frame, detections, stats)
+# print("[DEBUG] Called analytics_controller.process_frame_data for analytics update")
+# except Exception as e:
+# print(f"[ERROR] Could not update analytics: {e}")
+
+# # Control processing rate for file sources
+# if isinstance(self.source, str) and self.source_fps > 0:
+# frame_duration = time.time() - process_start
+# if frame_duration < frame_time:
+# time.sleep(frame_time - frame_duration)
+
+# cap.release()
+# except Exception as e:
+# print(f"Video processing error: {e}")
+# import traceback
+# traceback.print_exc()
+# finally:
+# self._running = False
+# def _process_frame(self):
+# """Process current frame for display with improved error handling"""
+# try:
+# self.mutex.lock()
+# if self.current_frame is None:
+# print("⚠️ No frame available to process")
+# self.mutex.unlock()
+
+# # Check if we're running - if not, this is expected behavior
+# if not self._running:
+# return
+
+# # If we are running but have no frame, create a blank frame with error message
+# h, w = 480, 640 # Default size
+# blank_frame = np.zeros((h, w, 3), dtype=np.uint8)
+# cv2.putText(blank_frame, "No video input", (w//2-100, h//2),
+# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
+
+# # Emit this blank frame
+# try:
+# self.frame_np_ready.emit(blank_frame)
+# except Exception as e:
+# print(f"Error emitting blank frame: {e}")
+
+# return
+
+# # Make a copy of the data we need
+# try:
+# frame = self.current_frame.copy()
+# detections = self.current_detections.copy() if self.current_detections else []
+# metrics = self.performance_metrics.copy()
+# except Exception as e:
+# print(f"Error copying frame data: {e}")
+# self.mutex.unlock()
+# return
+
+# self.mutex.unlock()
+# except Exception as e:
+# print(f"Critical error in _process_frame initialization: {e}")
+# import traceback
+# traceback.print_exc()
+# try:
+# self.mutex.unlock()
+# except:
+# pass
+# return
+
+# try:
+# # --- Simplified frame processing for display ---
+# # The violation logic is now handled in the main _run thread
+# # This method just handles basic display overlays
+
+# annotated_frame = frame.copy()
+
+# # Add performance overlays and debug markers - COMMENTED OUT for clean video display
+# # annotated_frame = draw_performance_overlay(annotated_frame, metrics)
+# # cv2.circle(annotated_frame, (20, 20), 10, (255, 255, 0), -1)
+
+# # Convert BGR to RGB before display (for PyQt/PySide)
+# frame_rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
+# # Display the RGB frame in the UI (replace with your display logic)
+# # Example: self.image_label.setPixmap(QPixmap.fromImage(QImage(frame_rgb.data, w, h, QImage.Format_RGB888)))
+# except Exception as e:
+# print(f"Error in _process_frame: {e}")
+# import traceback
+# traceback.print_exc()
+
+# def _cleanup_old_vehicle_data(self, current_track_ids):
+# """
+# Clean up tracking data for vehicles that are no longer being tracked.
+# This prevents memory leaks and improves performance.
+
+# Args:
+# current_track_ids: Set of currently active track IDs
+# """
+# # Find IDs that are no longer active
+# old_ids = set(self.vehicle_history.keys()) - set(current_track_ids)
+
+# if old_ids:
+# print(f"[CLEANUP] Removing tracking data for {len(old_ids)} old vehicle IDs: {sorted(old_ids)}")
+# for old_id in old_ids:
+# # Remove from history and status tracking
+# if old_id in self.vehicle_history:
+# del self.vehicle_history[old_id]
+# if old_id in self.vehicle_statuses:
+# del self.vehicle_statuses[old_id]
+# print(f"[CLEANUP] Now tracking {len(self.vehicle_history)} active vehicles")
+
+# # --- Removed unused internal violation line detection methods and RedLightViolationSystem usage ---
+# def play(self):
+# """Alias for start(), for UI compatibility."""
+# self.start()
+
+# def pause(self):
+# """Pause video processing."""
+# print("[VideoController] Pause requested")
+# self.mutex.lock()
+# self._paused = True
+# self.mutex.unlock()
+
+# # Emit the last captured frame for VLM analysis if available
+# if hasattr(self, '_last_frame') and self._last_frame is not None:
+# print("[VideoController] Emitting last frame for VLM analysis during pause")
+# try:
+# # Emit the last frame with empty detections for VLM
+# self.raw_frame_ready.emit(self._last_frame.copy(), [], 0.0)
+# print("✅ Last frame emitted for VLM analysis")
+# except Exception as e:
+# print(f"❌ Error emitting last frame: {e}")
+# else:
+# print("[VideoController] No last frame available for VLM analysis")
+
+# # Emit pause state signal
+# self.pause_state_changed.emit(True)
+# print("[VideoController] Pause state signal emitted: True")
+
+# def resume(self):
+# """Resume video processing from pause."""
+# print("[VideoController] Resume requested")
+# self.mutex.lock()
+# self._paused = False
+# self.pause_condition.wakeAll() # Wake up any waiting threads
+# self.mutex.unlock()
+# # Emit pause state signal
+# self.pause_state_changed.emit(False)
+# print("[VideoController] Pause state signal emitted: False")
+
+# def get_current_analysis_data(self):
+# """Get current analysis data for VLM insights."""
+# return self._last_analysis_data.copy() if self._last_analysis_data else {}
+
+# def _on_scene_object_detected(self, obj_data: dict):
+# """Handle scene object detection signal"""
+# try:
+# # Forward scene object detection to analytics
+# print(f"[SCENE] Object detected: {obj_data.get('category', 'unknown')} "
+# f"(confidence: {obj_data.get('confidence', 0):.2f})")
+# except Exception as e:
+# print(f"Error handling scene object detection: {e}")
+
+# def _on_scene_analytics_updated(self, analytics_data: dict):
+# """Handle scene analytics update signal"""
+# try:
+# # Forward scene analytics to performance stats
+# camera_id = analytics_data.get('camera_id', 'unknown')
+# fps = analytics_data.get('fps', 0)
+# processing_time = analytics_data.get('processing_time_ms', 0)
+# object_count = analytics_data.get('object_count', 0)
+
+# # Update performance metrics with scene analytics
+# self.performance_metrics['Scene_FPS'] = fps
+# self.performance_metrics['Scene_Objects'] = object_count
+# self.performance_metrics['Scene_Processing_ms'] = processing_time
+
+# print(f"[SCENE] Analytics updated - FPS: {fps:.1f}, Objects: {object_count}, "
+# f"Processing: {processing_time:.1f}ms")
+
+# except Exception as e:
+# print(f"Error handling scene analytics update: {e}")
+
+# def _on_roi_event_detected(self, event_data: dict):
+# """Handle ROI event detection signal"""
+# try:
+# event_type = event_data.get('type', 'unknown')
+# roi_id = event_data.get('roi_id', 'unknown')
+# object_category = event_data.get('object_category', 'unknown')
+
+# print(f"[SCENE] ROI Event: {event_type} in {roi_id} - {object_category}")
+
+# # Emit as violation if it's a safety-related event
+# if 'safety' in event_type.lower() or 'violation' in event_type.lower():
+# violation_data = {
+# 'type': 'roi_violation',
+# 'roi_id': roi_id,
+# 'object_category': object_category,
+# 'timestamp': event_data.get('timestamp'),
+# 'confidence': event_data.get('confidence', 1.0),
+# 'source': 'scene_analytics'
+# }
+# self.violation_detected.emit(violation_data)
+
+# except Exception as e:
+# print(f"Error handling ROI event: {e}")
+
+# @Slot(str)
+# def on_model_switched(self, device):
+# """Handle device switch from config panel."""
+# try:
+# print(f"🔄 Video Controller: Device switch requested to {device}")
+
+# # Update our device reference
+# self.current_device = device
+# print(f"✅ Video Controller: current_device updated to {device}")
+
+# # If we have a model manager, the device switch should already be done
+# # Just log the current state for verification
+# if self.model_manager and hasattr(self.model_manager, 'detector'):
+# if hasattr(self.model_manager.detector, 'device'):
+# current_device = self.model_manager.detector.device
+# print(f"✅ Video Controller: Model manager detector now using device: {current_device}")
+# else:
+# print(f"✅ Video Controller: Model manager detector updated to {device}")
+
+# print(f"✅ Video Controller: Device switch to {device} completed")
+
+# except Exception as e:
+# print(f"❌ Video Controller: Error during device switch: {e}")
+from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer, Slot
from PySide6.QtGui import QImage, QPixmap
import cv2
import time
@@ -87,6 +2041,7 @@ class VideoController(QObject):
auto_select_model_device = Signal() # Signal for UI to request auto model/device selection
performance_stats_ready = Signal(dict) # NEW: Signal for performance tab (fps, inference, device, res)
violations_batch_ready = Signal(list) # NEW: Signal to emit a batch of violations
+ pause_state_changed = Signal(bool) # Signal emitted when pause state changes (True=paused, False=playing)
def __init__(self, model_manager=None):
"""
@@ -99,11 +2054,15 @@ class VideoController(QObject):
print("Loaded advanced VideoController from video_controller_new.py") # DEBUG: Confirm correct controller
self._running = False
+ self._paused = False # Add pause state
+ self._last_frame = None # Store last frame for VLM analysis during pause
+ self._last_analysis_data = {} # Store last analysis data for VLM
self.source = None
self.source_type = None
self.source_fps = 0
self.performance_metrics = {}
self.mutex = QMutex()
+ self.pause_condition = QWaitCondition() # Add wait condition for pause
# Performance tracking
self.processing_times = deque(maxlen=100) # Store last 100 processing times
@@ -116,6 +2075,13 @@ class VideoController(QObject):
self.inference_model = None
self.tracker = None
+ # Initialize device tracking
+ if self.model_manager and hasattr(self.model_manager, 'config'):
+ self.current_device = self.model_manager.config.get("detection", {}).get("device", "CPU")
+ else:
+ self.current_device = "CPU"
+ print(f"🔧 Video Controller: Initialized with device: {self.current_device}")
+
self.current_frame = None
self.current_detections = []
@@ -187,6 +2153,18 @@ class VideoController(QObject):
# Add red light violation system
# self.red_light_violation_system = RedLightViolationSystem()
+ # Initialize scene analytics adapter
+ try:
+ from utils.scene_analytics import SceneAnalyticsAdapter
+ self.scene_analytics = SceneAnalyticsAdapter(camera_id="desktop_main")
+ self.scene_analytics.object_detected.connect(self._on_scene_object_detected)
+ self.scene_analytics.scene_analytics_updated.connect(self._on_scene_analytics_updated)
+ self.scene_analytics.roi_event_detected.connect(self._on_roi_event_detected)
+ print("✅ Scene analytics adapter initialized")
+ except Exception as e:
+ self.scene_analytics = None
+ print(f"❌ Could not initialize scene analytics: {e}")
+
def set_source(self, source):
"""
Set video source (file path, camera index, or URL)
@@ -389,6 +2367,13 @@ class VideoController(QObject):
if self._running:
print("DEBUG: Stopping video processing")
self._running = False
+
+ # If paused, wake up the thread so it can exit
+ self.mutex.lock()
+ self._paused = False
+ self.pause_condition.wakeAll()
+ self.mutex.unlock()
+
self.render_timer.stop()
# Properly terminate the thread
if self.thread.isRunning():
@@ -569,6 +2554,18 @@ class VideoController(QObject):
# --- End Violation Rule Functions ---
while self._running and cap.isOpened():
+ # Handle pause state
+ self.mutex.lock()
+ if self._paused:
+ print("[VideoController] Video paused, waiting...")
+ self.pause_condition.wait(self.mutex)
+ print("[VideoController] Video resumed")
+ self.mutex.unlock()
+
+ # Exit if we're no longer running (could have stopped while paused)
+ if not self._running:
+ break
+
try:
ret, frame = cap.read()
# Add critical frame debugging
@@ -588,6 +2585,11 @@ class VideoController(QObject):
# Reset the error counter if we successfully got a frame
frame_error_count = 0
+
+ # Store the last frame for VLM analysis during pause
+ self._last_frame = frame.copy()
+ print(f"🟢 Last frame stored for VLM: {frame.shape}")
+
except Exception as e:
print(f"❌ Critical error reading frame: {e}")
frame_error_count += 1
@@ -685,7 +2687,17 @@ class VideoController(QObject):
self.current_frame = frame.copy()
self.current_detections = detections
self.mutex.unlock()
- # Process frame with annotations before sending to UI
+
+ # --- SCENE ANALYTICS PROCESSING ---
+ # Process detections through scene analytics if available
+ if self.scene_analytics:
+ try:
+ scene_analytics_data = self.scene_analytics.process_frame(frame, detections)
+ # Scene analytics automatically emit signals that we handle above
+ except Exception as e:
+ print(f"Error in scene analytics processing: {e}")
+
+ # Process frame with annotations before sending to UI
annotated_frame = frame.copy()
# --- VIOLATION DETECTION LOGIC (Run BEFORE drawing boxes) ---
@@ -1102,7 +3114,9 @@ class VideoController(QObject):
# --- 1. Pedestrian right-of-way violation ---
if crosswalk_poly and latest_light_color == 'green' and pedestrian_tracks:
for track in tracked_vehicles:
- if point_in_polygon(track['center'], crosswalk_poly):
+ # Only consider moving vehicles - stopped vehicles aren't likely to be violating
+ if track.get('is_moving', False) and point_in_polygon(track['center'], crosswalk_poly):
+ # Check for pedestrians in the crosswalk
for ped in pedestrian_tracks:
if point_in_polygon(ped['center'], crosswalk_poly):
# Vehicle is blocking crosswalk during green with pedestrian present
@@ -1115,16 +3129,21 @@ class VideoController(QObject):
'timestamp': datetime.now(),
'details': {
'pedestrian_bbox': ped['bbox'],
- 'crosswalk_bbox': crosswalk_bbox
+ 'crosswalk_bbox': crosswalk_bbox,
+ 'is_moving': track.get('is_moving', False),
+ 'traffic_light': latest_light_color
}
})
- print(f"[VIOLATION] Pedestrian right-of-way violation: Vehicle ID={track['id']} blocking crosswalk during green")
+ print(f"[VIOLATION] Pedestrian right-of-way violation: Vehicle ID={track['id']} blocking crosswalk during green light with pedestrian present")
# --- 2. Improper stopping over crosswalk at red ---
if crosswalk_poly and latest_light_color == 'red':
for track in tracked_vehicles:
- if point_in_polygon(track['center'], crosswalk_poly):
- # Calculate overlap ratio
+ # Check if vehicle is not moving (to confirm it's stopped)
+ is_stopped = not track.get('is_moving', True)
+
+ if is_stopped and point_in_polygon(track['center'], crosswalk_poly):
+ # Calculate overlap ratio between vehicle and crosswalk
vx1, vy1, vx2, vy2 = track['bbox']
cx, cy, cw, ch = crosswalk_poly
overlap_x1 = max(vx1, cx)
@@ -1134,7 +3153,8 @@ class VideoController(QObject):
overlap_area = max(0, overlap_x2 - overlap_x1) * max(0, overlap_y2 - overlap_y1)
vehicle_area = (vx2 - vx1) * (vy2 - vy1)
overlap_ratio = overlap_area / max(vehicle_area, 1)
- # Check if vehicle is stopped (low speed)
+
+ # Double-verify that vehicle is stopped by checking explicit speed
speed = 0.0
hist = vehicle_position_history.get(track['id'], [])
if len(hist) >= 2:
@@ -1142,7 +3162,9 @@ class VideoController(QObject):
dist = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5
dt = max(t2-t1, 1e-3)
speed = dist / dt
- if overlap_ratio > 0.3 and speed < 0.5:
+
+ # Vehicle must have significant overlap with crosswalk (>25%) and be stopped
+ if overlap_ratio > 0.25 and speed < 0.8:
violations.append({
'track_id': track['id'],
'id': track['id'],
@@ -1153,37 +3175,64 @@ class VideoController(QObject):
'details': {
'overlap_ratio': overlap_ratio,
'speed': speed,
- 'crosswalk_bbox': crosswalk_bbox
+ 'crosswalk_bbox': crosswalk_bbox,
+ 'traffic_light': latest_light_color,
+ 'is_moving_flag': track.get('is_moving', None)
}
})
- print(f"[VIOLATION] Improper stop on crosswalk: Vehicle ID={track['id']} overlap={overlap_ratio:.2f} speed={speed:.2f}")
+ print(f"[VIOLATION] Improper stop on crosswalk: Vehicle ID={track['id']} stopped on crosswalk during red light (overlap={overlap_ratio:.2f}, speed={speed:.2f})")
# --- 3. Accelerating through yellow/amber light ---
if stopline_poly and latest_light_color == 'yellow' and amber_start_time:
- speed_limit_px_per_sec = 8.0 # Example threshold, tune as needed
+ # Calculate time since light turned yellow
+ current_time = time.time()
+ time_since_yellow = current_time - amber_start_time
+
+ # Speed threshold (in pixels per second) - can be adjusted based on testing
+ speed_limit_px_per_sec = 8.0
+
+ # Check each vehicle approaching the intersection
for track in tracked_vehicles:
- if point_in_polygon(track['center'], stopline_poly):
- # Calculate speed delta
- hist = vehicle_position_history.get(track['id'], [])
- if len(hist) >= 3:
- (c1, t1), (c2, t2), (c3, t3) = hist[-3], hist[-2], hist[-1]
- v1 = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5 / max(t2-t1, 1e-3)
- v2 = ((c3[0]-c2[0])**2 + (c3[1]-c2[1])**2)**0.5 / max(t3-t2, 1e-3)
- if v2 > v1 * 1.2 and v2 > speed_limit_px_per_sec:
- violations.append({
- 'track_id': track['id'],
- 'id': track['id'],
- 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])],
- 'violation': 'amber_acceleration',
- 'violation_type': 'amber_acceleration',
- 'timestamp': datetime.now(),
- 'details': {
- 'speed_before': v1,
- 'speed_after': v2,
- 'crosswalk_bbox': crosswalk_bbox
- }
- })
- print(f"[VIOLATION] Amber acceleration: Vehicle ID={track['id']} v1={v1:.2f} v2={v2:.2f}")
+ # Check if vehicle is near the stop line/intersection
+ if point_in_polygon(track['center'], stopline_poly) or (
+ track['center'][1] < stopline_poly[1] + stopline_poly[3] + 50 and
+ track['center'][1] > stopline_poly[1] - 50
+ ):
+ # If the vehicle is moving (confirmed via tracker)
+ if track.get('is_moving', False):
+ # Calculate acceleration by looking at recent speed changes
+ hist = vehicle_position_history.get(track['id'], [])
+ if len(hist) >= 3:
+ # Calculate speeds at different time points
+ (c1, t1), (c2, t2), (c3, t3) = hist[-3], hist[-2], hist[-1]
+
+ # Speed at earlier point
+ v1 = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5 / max(t2-t1, 1e-3)
+
+ # Speed at later point
+ v2 = ((c3[0]-c2[0])**2 + (c3[1]-c2[1])**2)**0.5 / max(t3-t2, 1e-3)
+
+ # Acceleration violation if:
+ # 1. Speed increases significantly (>20%)
+ # 2. Final speed exceeds threshold
+ # 3. Yellow light is less than 3 seconds old (typical acceleration window)
+ if v2 > v1 * 1.2 and v2 > speed_limit_px_per_sec and time_since_yellow < 3.0:
+ violations.append({
+ 'track_id': track['id'],
+ 'id': track['id'],
+ 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])],
+ 'violation': 'amber_acceleration',
+ 'violation_type': 'amber_acceleration',
+ 'timestamp': datetime.now(),
+ 'details': {
+ 'speed_before': v1,
+ 'speed_after': v2,
+ 'acceleration': (v2-v1)/max(t3-t2, 1e-3),
+ 'time_since_yellow': time_since_yellow,
+ 'traffic_light': latest_light_color
+ }
+ })
+ print(f"[VIOLATION] Amber light acceleration: Vehicle ID={track['id']} accelerated from {v1:.2f} to {v2:.2f} px/sec {time_since_yellow:.1f}s after yellow light")
# Emit progress signal after processing each frame
if hasattr(self, 'progress_ready'):
@@ -1551,6 +3600,19 @@ class VideoController(QObject):
print(f"[PERF] Emitting performance_stats_ready: {perf_stats}")
self.performance_stats_ready.emit(perf_stats)
+ # --- Update last analysis data for VLM ---
+ self._last_analysis_data = {
+ 'detections': detections,
+ 'tracked_vehicles': tracked_vehicles if 'tracked_vehicles' in locals() else [],
+ 'traffic_light': self.latest_traffic_light,
+ 'crosswalk_bbox': crosswalk_bbox if 'crosswalk_bbox' in locals() else None,
+ 'violation_line_y': violation_line_y if 'violation_line_y' in locals() else None,
+ 'crosswalk_detected': crosswalk_bbox is not None if 'crosswalk_bbox' in locals() else False,
+ 'traffic_light_position': traffic_light_position if has_traffic_lights else None,
+ 'frame_shape': frame.shape if frame is not None else None,
+ 'timestamp': time.time()
+ }
+
# --- Ensure analytics update every frame ---
# Always add traffic_light_color to each detection dict for analytics
for det in detections:
@@ -1669,5 +3731,118 @@ class VideoController(QObject):
def play(self):
"""Alias for start(), for UI compatibility."""
self.start()
+
+ def pause(self):
+ """Pause video processing."""
+ print("[VideoController] Pause requested")
+ self.mutex.lock()
+ self._paused = True
+ self.mutex.unlock()
+
+ # Emit the last captured frame for VLM analysis if available
+ if hasattr(self, '_last_frame') and self._last_frame is not None:
+ print("[VideoController] Emitting last frame for VLM analysis during pause")
+ try:
+ # Emit the last frame with empty detections for VLM
+ self.raw_frame_ready.emit(self._last_frame.copy(), [], 0.0)
+ print("✅ Last frame emitted for VLM analysis")
+ except Exception as e:
+ print(f"❌ Error emitting last frame: {e}")
+ else:
+ print("[VideoController] No last frame available for VLM analysis")
+
+ # Emit pause state signal
+ self.pause_state_changed.emit(True)
+ print("[VideoController] Pause state signal emitted: True")
+
+ def resume(self):
+ """Resume video processing from pause."""
+ print("[VideoController] Resume requested")
+ self.mutex.lock()
+ self._paused = False
+ self.pause_condition.wakeAll() # Wake up any waiting threads
+ self.mutex.unlock()
+ # Emit pause state signal
+ self.pause_state_changed.emit(False)
+ print("[VideoController] Pause state signal emitted: False")
+ def get_current_analysis_data(self):
+ """Get current analysis data for VLM insights."""
+ return self._last_analysis_data.copy() if self._last_analysis_data else {}
+ def _on_scene_object_detected(self, obj_data: dict):
+ """Handle scene object detection signal"""
+ try:
+ # Forward scene object detection to analytics
+ print(f"[SCENE] Object detected: {obj_data.get('category', 'unknown')} "
+ f"(confidence: {obj_data.get('confidence', 0):.2f})")
+ except Exception as e:
+ print(f"Error handling scene object detection: {e}")
+
+ def _on_scene_analytics_updated(self, analytics_data: dict):
+ """Handle scene analytics update signal"""
+ try:
+ # Forward scene analytics to performance stats
+ camera_id = analytics_data.get('camera_id', 'unknown')
+ fps = analytics_data.get('fps', 0)
+ processing_time = analytics_data.get('processing_time_ms', 0)
+ object_count = analytics_data.get('object_count', 0)
+
+ # Update performance metrics with scene analytics
+ self.performance_metrics['Scene_FPS'] = fps
+ self.performance_metrics['Scene_Objects'] = object_count
+ self.performance_metrics['Scene_Processing_ms'] = processing_time
+
+ print(f"[SCENE] Analytics updated - FPS: {fps:.1f}, Objects: {object_count}, "
+ f"Processing: {processing_time:.1f}ms")
+
+ except Exception as e:
+ print(f"Error handling scene analytics update: {e}")
+
+ def _on_roi_event_detected(self, event_data: dict):
+ """Handle ROI event detection signal"""
+ try:
+ event_type = event_data.get('type', 'unknown')
+ roi_id = event_data.get('roi_id', 'unknown')
+ object_category = event_data.get('object_category', 'unknown')
+
+ print(f"[SCENE] ROI Event: {event_type} in {roi_id} - {object_category}")
+
+ # Emit as violation if it's a safety-related event
+ if 'safety' in event_type.lower() or 'violation' in event_type.lower():
+ violation_data = {
+ 'type': 'roi_violation',
+ 'roi_id': roi_id,
+ 'object_category': object_category,
+ 'timestamp': event_data.get('timestamp'),
+ 'confidence': event_data.get('confidence', 1.0),
+ 'source': 'scene_analytics'
+ }
+ self.violation_detected.emit(violation_data)
+
+ except Exception as e:
+ print(f"Error handling ROI event: {e}")
+
+ @Slot(str)
+ def on_model_switched(self, device):
+ """Handle device switch from config panel."""
+ try:
+ print(f"🔄 Video Controller: Device switch requested to {device}")
+
+ # Update our device reference
+ self.current_device = device
+ print(f"✅ Video Controller: current_device updated to {device}")
+
+ # If we have a model manager, the device switch should already be done
+ # Just log the current state for verification
+ if self.model_manager and hasattr(self.model_manager, 'detector'):
+ if hasattr(self.model_manager.detector, 'device'):
+ current_device = self.model_manager.detector.device
+ print(f"✅ Video Controller: Model manager detector now using device: {current_device}")
+ else:
+ print(f"✅ Video Controller: Model manager detector updated to {device}")
+
+ print(f"✅ Video Controller: Device switch to {device} completed")
+
+ except Exception as e:
+ print(f"❌ Video Controller: Error during device switch: {e}")
diff --git a/qt_app_pyside1/finale/views/settings_view.py b/qt_app_pyside1/finale/views/settings_view.py
index a49ad4b..336386d 100644
--- a/qt_app_pyside1/finale/views/settings_view.py
+++ b/qt_app_pyside1/finale/views/settings_view.py
@@ -10,7 +10,7 @@ from PySide6.QtWidgets import (
QSlider, QTextEdit, QFileDialog, QMessageBox, QProgressBar,
QFormLayout, QButtonGroup, QRadioButton
)
-from PySide6.QtCore import Qt, Signal, Slot, QTimer, QSettings, QThread, pyqtSignal
+from PySide6.QtCore import Qt, Signal, Slot, QTimer, QSettings, QThread
from PySide6.QtGui import QFont, QPixmap
import os
diff --git a/qt_app_pyside1/requirements.txt b/qt_app_pyside1/requirements.txt
index 97d3759..e8704d6 100644
Binary files a/qt_app_pyside1/requirements.txt and b/qt_app_pyside1/requirements.txt differ
diff --git a/qt_app_pyside1/ui/__pycache__/__init__.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/__init__.cpython-311.pyc
index d739730..a7d1179 100644
Binary files a/qt_app_pyside1/ui/__pycache__/__init__.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/__init__.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/analytics_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/analytics_tab.cpython-311.pyc
index 927afce..b53f5ac 100644
Binary files a/qt_app_pyside1/ui/__pycache__/analytics_tab.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/analytics_tab.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/config_panel.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/config_panel.cpython-311.pyc
index 2bc6d22..6141131 100644
Binary files a/qt_app_pyside1/ui/__pycache__/config_panel.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/config_panel.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/export_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/export_tab.cpython-311.pyc
index a6b9ec5..44ea6cd 100644
Binary files a/qt_app_pyside1/ui/__pycache__/export_tab.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/export_tab.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/global_status_panel.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/global_status_panel.cpython-311.pyc
index aa77d0f..16dfeb3 100644
Binary files a/qt_app_pyside1/ui/__pycache__/global_status_panel.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/global_status_panel.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/live_multi_cam_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/live_multi_cam_tab.cpython-311.pyc
index c1fde58..ce2322a 100644
Binary files a/qt_app_pyside1/ui/__pycache__/live_multi_cam_tab.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/live_multi_cam_tab.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/main_window.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/main_window.cpython-311.pyc
index 3f2d2e0..a8f2088 100644
Binary files a/qt_app_pyside1/ui/__pycache__/main_window.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/main_window.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/performance_graphs.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/performance_graphs.cpython-311.pyc
index 1ab6b0a..9885d00 100644
Binary files a/qt_app_pyside1/ui/__pycache__/performance_graphs.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/performance_graphs.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/video_detection_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/video_detection_tab.cpython-311.pyc
index ee62b2b..1474c85 100644
Binary files a/qt_app_pyside1/ui/__pycache__/video_detection_tab.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/video_detection_tab.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/__pycache__/violations_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/violations_tab.cpython-311.pyc
index b66c6ce..fc101dc 100644
Binary files a/qt_app_pyside1/ui/__pycache__/violations_tab.cpython-311.pyc and b/qt_app_pyside1/ui/__pycache__/violations_tab.cpython-311.pyc differ
diff --git a/qt_app_pyside1/ui/analytics_tab.py b/qt_app_pyside1/ui/analytics_tab.py
index 4a2c6b3..e4e8941 100644
--- a/qt_app_pyside1/ui/analytics_tab.py
+++ b/qt_app_pyside1/ui/analytics_tab.py
@@ -1,662 +1,563 @@
from PySide6.QtWidgets import (
QWidget, QVBoxLayout, QHBoxLayout, QLabel,
- QGroupBox, QPushButton, QScrollArea, QSplitter
+ QPushButton, QTabWidget, QTableWidget, QTableWidgetItem, QHeaderView
)
from PySide6.QtCore import Qt, Slot
-from PySide6.QtCharts import QChart, QChartView, QLineSeries, QPieSeries, QBarSeries, QBarSet, QBarCategoryAxis, QScatterSeries, QValueAxis
-from PySide6.QtGui import QPainter, QColor, QPen, QFont, QBrush, QLinearGradient, QGradient
+from PySide6.QtGui import QColor, QFont
-class ChartWidget(QWidget):
- """Base widget for analytics charts"""
- def __init__(self, title):
- super().__init__()
- self.layout = QVBoxLayout(self)
- self.layout.setContentsMargins(0, 0, 0, 0)
-
- # Chart title
- self.title_label = QLabel(title)
- self.title_label.setAlignment(Qt.AlignCenter)
- self.title_label.setStyleSheet("font-weight: bold; font-size: 14px;")
- self.layout.addWidget(self.title_label)
-
- # Create chart
- self.chart = QChart()
- self.chart.setAnimationOptions(QChart.SeriesAnimations)
- self.chart.setBackgroundBrush(QBrush(QColor(240, 240, 240)))
- self.chart.legend().setVisible(True)
- self.chart.legend().setAlignment(Qt.AlignBottom)
-
- # Chart view
- self.chartview = QChartView(self.chart)
- self.chartview.setRenderHint(QPainter.RenderHint.Antialiasing)
- self.layout.addWidget(self.chartview)
-
- self.setMinimumSize(400, 300)
-class TimeSeriesChart(ChartWidget):
- """Time series chart for traffic data"""
- def __init__(self, title="Traffic Over Time"):
- super().__init__(title)
-
- # Create series
- self.vehicle_series = QLineSeries()
- self.vehicle_series.setName("Vehicles")
- self.vehicle_series.setPen(QPen(QColor(0, 162, 232), 2))
-
- self.pedestrian_series = QLineSeries()
- self.pedestrian_series.setName("Pedestrians")
- self.pedestrian_series.setPen(QPen(QColor(255, 140, 0), 2))
-
- self.violation_series = QLineSeries()
- self.violation_series.setName("Violations")
- self.violation_series.setPen(QPen(QColor(232, 0, 0), 2))
-
- self.traffic_light_color_series = QLineSeries()
- self.traffic_light_color_series.setName("Traffic Light Color")
- self.traffic_light_color_series.setPen(QPen(QColor(128, 0, 128), 2, Qt.DashLine))
-
- # Add series to chart
- self.chart.addSeries(self.vehicle_series)
- self.chart.addSeries(self.pedestrian_series)
- self.chart.addSeries(self.violation_series)
- self.chart.addSeries(self.traffic_light_color_series)
-
- # Create and configure axes
- self.chart.createDefaultAxes()
- x_axis = self.chart.axes(Qt.Horizontal)[0]
- x_axis.setTitleText("Time")
- x_axis.setGridLineVisible(True)
- x_axis.setLabelsAngle(45)
-
- y_axis = self.chart.axes(Qt.Vertical)[0]
- y_axis.setTitleText("Count")
- y_axis.setGridLineVisible(True)
-
- def update_data(self, time_series):
- """Update chart with new time series data"""
- try:
- if not time_series or 'timestamps' not in time_series:
- return
-
- # Check if chart and series are still valid
- if not hasattr(self, 'chart') or self.chart is None:
- return
- if not hasattr(self, 'vehicle_series') or self.vehicle_series is None:
- return
-
- timestamps = time_series.get('timestamps', [])
- vehicle_counts = time_series.get('vehicle_counts', [])
- pedestrian_counts = time_series.get('pedestrian_counts', [])
- violation_counts = time_series.get('violation_counts', [])
- traffic_light_colors = time_series.get('traffic_light_colors', [])
-
- # Clear existing series safely
- try:
- self.vehicle_series.clear()
- self.pedestrian_series.clear()
- self.violation_series.clear()
- self.traffic_light_color_series.clear()
- except RuntimeError:
- # C++ object was already deleted, skip update
- return
-
- # Add data points
- for i in range(len(timestamps)):
- try:
- # Add x as index, y as count
- self.vehicle_series.append(i, vehicle_counts[i] if i < len(vehicle_counts) else 0)
- self.pedestrian_series.append(i, pedestrian_counts[i] if i < len(pedestrian_counts) else 0)
- self.violation_series.append(i, violation_counts[i] if i < len(violation_counts) else 0)
-
- # Add traffic light color as mapped int for charting (0=unknown, 1=red, 2=yellow, 3=green)
- if i < len(traffic_light_colors):
- color_map = {'unknown': 0, 'red': 1, 'yellow': 2, 'green': 3}
- color_val = color_map.get(traffic_light_colors[i], 0)
- self.traffic_light_color_series.append(i, color_val)
- except RuntimeError:
- # C++ object was deleted during update
- return
-
- # Update axes safely
- try:
- axes = self.chart.axes(Qt.Horizontal)
- if axes:
- axes[0].setRange(0, max(len(timestamps)-1, 10))
-
- max_count = max(
- max(vehicle_counts) if vehicle_counts else 0,
- max(pedestrian_counts) if pedestrian_counts else 0,
- max(violation_counts) if violation_counts else 0
- )
- axes = self.chart.axes(Qt.Vertical)
- if axes:
- axes[0].setRange(0, max(max_count+1, 5))
- except (RuntimeError, IndexError):
- # Chart axes were deleted or not available
- pass
-
- # Optionally, set y-axis label for traffic light color
- axes = self.chart.axes(Qt.Vertical)
- if axes:
- axes[0].setTitleText("Count / TL Color (0=U,1=R,2=Y,3=G)")
- except Exception as e:
- print(f"[WARNING] Chart update failed: {e}")
-
-class DetectionPieChart(ChartWidget):
- """Pie chart for detected object classes"""
- def __init__(self, title="Detection Classes"):
- super().__init__(title)
-
- self.pie_series = QPieSeries()
- self.chart.addSeries(self.pie_series)
-
- def update_data(self, detection_counts):
- """Update chart with detection counts"""
- try:
- if not detection_counts:
- return
-
- # Check if chart and series are still valid
- if not hasattr(self, 'chart') or self.chart is None:
- return
- if not hasattr(self, 'pie_series') or self.pie_series is None:
- return
-
- # Clear existing slices safely
- try:
- self.pie_series.clear()
- except RuntimeError:
- # C++ object was already deleted, skip update
- return
-
- # Add new slices
- for class_name, count in detection_counts.items():
- # Only add if count > 0
- if count > 0:
- try:
- slice = self.pie_series.append(class_name, count)
-
- # Set colors based on class
- if class_name.lower() == 'car':
- slice.setBrush(QColor(0, 200, 0))
- elif class_name.lower() == 'person':
- slice.setBrush(QColor(255, 165, 0))
- elif class_name.lower() == 'truck':
- slice.setBrush(QColor(0, 100, 200))
- elif class_name.lower() == 'bus':
- slice.setBrush(QColor(200, 0, 100))
-
- # Highlight important slices
- if count > 10:
- slice.setExploded(True)
- slice.setLabelVisible(True)
- except RuntimeError:
- # C++ object was deleted during update
- return
- except Exception as e:
- print(f"[WARNING] Pie chart update failed: {e}")
-
-class ViolationBarChart(ChartWidget):
- """Bar chart for violation types"""
- def __init__(self, title="Violations by Type"):
- super().__init__(title)
-
- # Create series
- self.bar_series = QBarSeries()
- self.chart.addSeries(self.bar_series)
-
- # Create axes
- self.axis_x = QBarCategoryAxis()
- self.chart.addAxis(self.axis_x, Qt.AlignBottom)
- self.bar_series.attachAxis(self.axis_x)
-
- self.chart.createDefaultAxes()
- self.chart.axes(Qt.Vertical)[0].setTitleText("Count")
-
- def update_data(self, violation_counts):
- """Update chart with violation counts"""
- try:
- if not violation_counts:
- return
-
- # Check if chart and series are still valid
- if not hasattr(self, 'chart') or self.chart is None:
- return
- if not hasattr(self, 'bar_series') or self.bar_series is None:
- return
- if not hasattr(self, 'axis_x') or self.axis_x is None:
- return
-
- # Clear existing data safely
- try:
- self.bar_series.clear()
- except RuntimeError:
- # C++ object was already deleted, skip update
- return
-
- # Create bar set
- bar_set = QBarSet("Violations")
-
- # Set colors
- try:
- bar_set.setColor(QColor(232, 0, 0))
- except RuntimeError:
- return
-
- # Add values
- values = []
- categories = []
-
- for violation_type, count in violation_counts.items():
- if count > 0:
- values.append(count)
- # Format violation type for display
- display_name = violation_type.replace('_', ' ').title()
- categories.append(display_name)
-
- if values:
- try:
- bar_set.append(values)
- self.bar_series.append(bar_set)
-
- # Update x-axis categories
- self.axis_x.setCategories(categories)
-
- # Update y-axis range
- y_axes = self.chart.axes(Qt.Vertical)
- if y_axes:
- y_axes[0].setRange(0, max(values) * 1.2)
- except RuntimeError:
- # C++ object was deleted during update
- return
- except Exception as e:
- print(f"[WARNING] Bar chart update failed: {e}")
-
-class LatencyChartWidget(ChartWidget):
- """Custom latency chart with spikes, device/res changes, and live stats legend."""
- def __init__(self, title="Inference Latency Over Time"):
- super().__init__(title)
- self.chart.setBackgroundBrush(QBrush(QColor(24, 28, 32)))
- self.title_label.setStyleSheet("font-weight: bold; font-size: 16px; color: #fff;")
- self.chart.legend().setVisible(False)
- # Main latency line
- self.latency_series = QLineSeries()
- self.latency_series.setName("Latency (ms)")
- self.latency_series.setPen(QPen(QColor(0, 255, 255), 2))
- self.chart.addSeries(self.latency_series)
- # Spikes as red dots
- self.spike_series = QScatterSeries()
- self.spike_series.setName("Spikes")
- self.spike_series.setMarkerSize(8)
- self.spike_series.setColor(QColor(255, 64, 64))
- self.chart.addSeries(self.spike_series)
- # Device/resolution change lines (vertical)
- self.event_lines = []
- # Axes
- self.chart.createDefaultAxes()
- self.x_axis = self.chart.axes(Qt.Horizontal)[0]
- self.x_axis.setTitleText("")
- self.x_axis.setLabelsColor(QColor("#fff"))
- self.x_axis.setGridLineColor(QColor("#444"))
- self.y_axis = self.chart.axes(Qt.Vertical)[0]
- self.y_axis.setTitleText("ms")
- self.y_axis.setLabelsColor(QColor("#fff"))
- self.y_axis.setGridLineColor(QColor("#444"))
- # Stats label
- self.stats_label = QLabel()
- self.stats_label.setStyleSheet("color: #00e6ff; font-size: 13px; font-weight: bold; margin: 2px 0 0 8px;")
- self.layout.addWidget(self.stats_label)
-
- def update_data(self, latency_data):
- """
- latency_data: dict with keys:
- 'latencies': list of float,
- 'spike_indices': list of int,
- 'device_switches': list of int,
- 'resolution_changes': list of int
- """
- if not latency_data or 'latencies' not in latency_data:
- return
- latencies = latency_data.get('latencies', [])
- spikes = set(latency_data.get('spike_indices', []))
- device_switches = set(latency_data.get('device_switches', []))
- res_changes = set(latency_data.get('resolution_changes', []))
- # Clear series
- self.latency_series.clear()
- self.spike_series.clear()
- # Remove old event lines
- for line in self.event_lines:
- self.chart.removeAxis(line)
- self.event_lines = []
- # Plot latency and spikes
- for i, val in enumerate(latencies):
- self.latency_series.append(i, val)
- if i in spikes:
- self.spike_series.append(i, val)
- # Add device/resolution change lines
- for idx in device_switches:
- line = QLineSeries()
- line.setPen(QPen(QColor(33, 150, 243), 3)) # Blue
- line.append(idx, min(latencies) if latencies else 0)
- line.append(idx, max(latencies) if latencies else 1)
- self.chart.addSeries(line)
- line.attachAxis(self.x_axis)
- line.attachAxis(self.y_axis)
- self.event_lines.append(line)
- for idx in res_changes:
- line = QLineSeries()
- line.setPen(QPen(QColor(255, 167, 38), 3)) # Orange
- line.append(idx, min(latencies) if latencies else 0)
- line.append(idx, max(latencies) if latencies else 1)
- self.chart.addSeries(line)
- line.attachAxis(self.x_axis)
- line.attachAxis(self.y_axis)
- self.event_lines.append(line)
- # Update axes
- self.x_axis.setRange(0, max(len(latencies)-1, 10))
- self.y_axis.setRange(0, max(max(latencies) if latencies else 1, 10))
- # Stats
- if latencies:
- avg = sum(latencies)/len(latencies)
- mx = max(latencies)
- self.stats_label.setText(f"Avg: {avg:.1f}ms | Max: {mx:.1f}ms | Spikes: {len(spikes)}")
- else:
- self.stats_label.setText("")
-
-class FPSChartWidget(ChartWidget):
- """FPS & Resolution Impact chart with device/resolution change lines and live stats."""
- def __init__(self, title="FPS & Resolution Impact"):
- super().__init__(title)
- self.chart.setBackgroundBrush(QBrush(QColor(24, 28, 32)))
- self.title_label.setStyleSheet("font-weight: bold; font-size: 16px; color: #fff;")
- self.chart.legend().setVisible(False)
- self.fps_series = QLineSeries()
- self.fps_series.setName("FPS")
- self.fps_series.setPen(QPen(QColor(0, 255, 255), 2))
- self.chart.addSeries(self.fps_series)
- self.event_lines = []
- self.chart.createDefaultAxes()
- self.x_axis = self.chart.axes(Qt.Horizontal)[0]
- self.x_axis.setLabelsColor(QColor("#fff"))
- self.x_axis.setGridLineColor(QColor("#444"))
- self.y_axis = self.chart.axes(Qt.Vertical)[0]
- self.y_axis.setTitleText("FPS")
- self.y_axis.setLabelsColor(QColor("#fff"))
- self.y_axis.setGridLineColor(QColor("#444"))
- self.stats_label = QLabel()
- self.stats_label.setStyleSheet("color: #00ff82; font-size: 13px; font-weight: bold; margin: 2px 0 0 8px;")
- self.layout.addWidget(self.stats_label)
- def update_data(self, fps_data):
- if not fps_data or 'fps' not in fps_data:
- return
- fps = fps_data.get('fps', [])
- device_switches = set(fps_data.get('device_switches', []))
- res_changes = set(fps_data.get('resolution_changes', []))
- device_labels = fps_data.get('device_labels', {})
- res_labels = fps_data.get('resolution_labels', {})
- self.fps_series.clear()
- for line in self.event_lines:
- self.chart.removeAxis(line)
- self.event_lines = []
- for i, val in enumerate(fps):
- self.fps_series.append(i, val)
- for idx in device_switches:
- line = QLineSeries()
- line.setPen(QPen(QColor(33, 150, 243), 3))
- line.append(idx, min(fps) if fps else 0)
- line.append(idx, max(fps) if fps else 1)
- self.chart.addSeries(line)
- line.attachAxis(self.x_axis)
- line.attachAxis(self.y_axis)
- self.event_lines.append(line)
- for idx in res_changes:
- line = QLineSeries()
- line.setPen(QPen(QColor(255, 167, 38), 3))
- line.append(idx, min(fps) if fps else 0)
- line.append(idx, max(fps) if fps else 1)
- self.chart.addSeries(line)
- line.attachAxis(self.x_axis)
- line.attachAxis(self.y_axis)
- self.event_lines.append(line)
- self.x_axis.setRange(0, max(len(fps)-1, 10))
- self.y_axis.setRange(0, max(max(fps) if fps else 1, 10))
- # Live stats (current FPS, resolution, device)
- cur_fps = fps[-1] if fps else 0
- cur_res = res_labels.get(len(fps)-1, "-")
- cur_dev = device_labels.get(len(fps)-1, "-")
- self.stats_label.setText(f"Current FPS: {cur_fps:.1f} | Resolution: {cur_res} | Device: {cur_dev}")
-
-class DeviceSwitchChartWidget(ChartWidget):
- """Device Switching & Resolution Changes chart with colored vertical lines and legend."""
- def __init__(self, title="Device Switching & Resolution Changes"):
- super().__init__(title)
- self.chart.setBackgroundBrush(QBrush(QColor(24, 28, 32)))
- self.title_label.setStyleSheet("font-weight: bold; font-size: 16px; color: #fff;")
- self.chart.legend().setVisible(False)
- self.event_lines = []
- self.chart.createDefaultAxes()
- self.x_axis = self.chart.axes(Qt.Horizontal)[0]
- self.x_axis.setLabelsColor(QColor("#fff"))
- self.x_axis.setGridLineColor(QColor("#444"))
- self.y_axis = self.chart.axes(Qt.Vertical)[0]
- self.y_axis.setTitleText("-")
- self.y_axis.setLabelsColor(QColor("#fff"))
- self.y_axis.setGridLineColor(QColor("#444"))
- self.legend_label = QLabel()
- self.legend_label.setStyleSheet("color: #ffb300; font-size: 13px; font-weight: bold; margin: 2px 0 0 8px;")
- self.layout.addWidget(self.legend_label)
- def update_data(self, event_data):
- if not event_data:
- return
- cpu_spikes = set(event_data.get('cpu_spikes', []))
- gpu_spikes = set(event_data.get('gpu_spikes', []))
- switches = set(event_data.get('switches', []))
- res_changes = set(event_data.get('res_changes', []))
- n = event_data.get('n', 100)
- for line in self.event_lines:
- self.chart.removeAxis(line)
- self.event_lines = []
- for idx in cpu_spikes:
- line = QLineSeries()
- line.setPen(QPen(QColor(255, 64, 64), 2))
- line.append(idx, 0)
- line.append(idx, 1)
- self.chart.addSeries(line)
- line.attachAxis(self.x_axis)
- line.attachAxis(self.y_axis)
- self.event_lines.append(line)
- for idx in gpu_spikes:
- line = QLineSeries()
- line.setPen(QPen(QColor(255, 87, 34), 2))
- line.append(idx, 0)
- line.append(idx, 1)
- self.chart.addSeries(line)
- line.attachAxis(self.x_axis)
- line.attachAxis(self.y_axis)
- self.event_lines.append(line)
- for idx in switches:
- line = QLineSeries()
- line.setPen(QPen(QColor(33, 150, 243), 2))
- line.append(idx, 0)
- line.append(idx, 1)
- self.chart.addSeries(line)
- line.attachAxis(self.x_axis)
- line.attachAxis(self.y_axis)
- self.event_lines.append(line)
- for idx in res_changes:
- line = QLineSeries()
- line.setPen(QPen(QColor(255, 167, 38), 2))
- line.append(idx, 0)
- line.append(idx, 1)
- self.chart.addSeries(line)
- line.attachAxis(self.x_axis)
- line.attachAxis(self.y_axis)
- self.event_lines.append(line)
- self.x_axis.setRange(0, n)
- self.y_axis.setRange(0, 1)
- self.legend_label.setText("CPU Spikes: {} | GPU Spikes: {} | Switches: {} | Res Changes: {}".format(len(cpu_spikes), len(gpu_spikes), len(switches), len(res_changes)))
-
-class AnalyticsTab(QWidget):
- """Analytics tab with charts and statistics"""
+class CleanAnalyticsWidget(QWidget):
+ """Clean and minimal analytics widget with tabbed interface"""
def __init__(self):
super().__init__()
- self.initUI()
+ # Data storage for real-time updates
+ self.latest_traffic_lights = []
+ self.latest_violations = []
+ self.latest_vehicles = []
+ self.latest_frame_data = {}
+ self.init_ui()
- def initUI(self):
- """Initialize UI components"""
- main_layout = QVBoxLayout(self)
+ def init_ui(self):
+ """Initialize the clean UI with tabs"""
+ layout = QVBoxLayout(self)
+ layout.setContentsMargins(10, 10, 10, 10)
+ layout.setSpacing(10)
- # Add notice that violations are disabled
- notice_label = QLabel("⚠️ Violation detection is currently disabled. Only object detection statistics will be shown.")
- notice_label.setStyleSheet("font-size: 14px; color: #FFA500; font-weight: bold; padding: 10px;")
- notice_label.setAlignment(Qt.AlignCenter)
- main_layout.addWidget(notice_label)
+ # Set dark background for the main widget
+ self.setStyleSheet("""
+ QWidget {
+ background-color: #2C3E50;
+ color: #FFFFFF;
+ }
+ """)
- # Charts section
- charts_splitter = QSplitter(Qt.Horizontal)
+ # Title
+ title_label = QLabel("🚦 Traffic Intersection Monitor")
+ title_label.setStyleSheet("""
+ QLabel {
+ font-size: 20px;
+ font-weight: bold;
+ color: #FFFFFF;
+ font-family: 'Roboto', Arial, sans-serif;
+ padding: 15px;
+ background: qlineargradient(x1:0, y1:0, x2:1, y2:0,
+ stop:0 #2C3E50, stop:1 #34495E);
+ border-radius: 8px;
+ border: 1px solid #34495E;
+ }
+ """)
+ title_label.setAlignment(Qt.AlignCenter)
+ layout.addWidget(title_label)
- # Latency chart (top, full width)
- self.latency_chart = LatencyChartWidget("Inference Latency Over Time")
- main_layout.addWidget(self.latency_chart)
+ # Create tab widget
+ self.tab_widget = QTabWidget()
+ self.tab_widget.setStyleSheet("""
+ QTabWidget::pane {
+ border: 1px solid #34495E;
+ border-radius: 8px;
+ background-color: #2C3E50;
+ }
+ QTabBar::tab {
+ background: #34495E;
+ color: #FFFFFF;
+ padding: 12px 20px;
+ margin-right: 2px;
+ border-top-left-radius: 8px;
+ border-top-right-radius: 8px;
+ font-family: 'Roboto', Arial, sans-serif;
+ font-weight: 500;
+ min-width: 120px;
+ }
+ QTabBar::tab:selected {
+ background: #3498DB;
+ color: white;
+ }
+ QTabBar::tab:hover:!selected {
+ background: #2C3E50;
+ }
+ """)
- # Left side - Time series chart
- self.time_series_chart = TimeSeriesChart("Traffic Over Time")
- charts_splitter.addWidget(self.time_series_chart)
+ # Create tabs
+ self.create_traffic_light_tab()
+ self.create_violation_tab()
+ self.create_vehicle_tab()
- # Right side - Detection and violation charts
- right_charts = QWidget()
- right_layout = QVBoxLayout(right_charts)
+ layout.addWidget(self.tab_widget)
- self.detection_chart = DetectionPieChart("Detection Classes")
- self.violation_chart = ViolationBarChart("Violations by Type")
+ # Refresh button
+ refresh_btn = QPushButton("🔄 Refresh Data")
+ refresh_btn.setStyleSheet("""
+ QPushButton {
+ background: qlineargradient(x1:0, y1:0, x2:0, y2:1,
+ stop:0 #3498DB, stop:1 #2980B9);
+ color: white;
+ border: none;
+ padding: 12px 24px;
+ border-radius: 6px;
+ font-weight: bold;
+ font-family: 'Roboto', Arial, sans-serif;
+ font-size: 14px;
+ }
+ QPushButton:hover {
+ background: qlineargradient(x1:0, y1:0, x2:0, y2:1,
+ stop:0 #5DADE2, stop:1 #3498DB);
+ }
+ QPushButton:pressed {
+ background: qlineargradient(x1:0, y1:0, x2:0, y2:1,
+ stop:0 #2980B9, stop:1 #21618C);
+ }
+ """)
+ refresh_btn.clicked.connect(self.refresh_all_data)
- right_layout.addWidget(self.detection_chart)
- right_layout.addWidget(self.violation_chart)
+ # Center the button
+ button_layout = QHBoxLayout()
+ button_layout.addStretch()
+ button_layout.addWidget(refresh_btn)
+ button_layout.addStretch()
+ layout.addLayout(button_layout)
- charts_splitter.addWidget(right_charts)
- charts_splitter.setSizes([500, 500]) # Equal initial sizes
+ def create_traffic_light_tab(self):
+ """Create traffic light status tab"""
+ tab = QWidget()
+ layout = QVBoxLayout(tab)
+ layout.setContentsMargins(15, 15, 15, 15)
- main_layout.addWidget(charts_splitter)
+ # Table
+ self.traffic_table = QTableWidget(0, 5)
+ self.traffic_table.setHorizontalHeaderLabels([
+ "Detection", "Red Ratio", "Yellow Ratio", "Green Ratio", "Status"
+ ])
- # Key metrics section
- metrics_box = QGroupBox("Key Metrics")
- metrics_layout = QHBoxLayout(metrics_box)
+ # Apply clean table styling
+ self.apply_table_style(self.traffic_table)
- # Vehicle metrics
- vehicle_metrics = QGroupBox("Traffic")
- vehicle_layout = QVBoxLayout(vehicle_metrics)
- self.total_vehicles_label = QLabel("Total Vehicles: 0")
- self.total_pedestrians_label = QLabel("Total Pedestrians: 0")
- vehicle_layout.addWidget(self.total_vehicles_label)
- vehicle_layout.addWidget(self.total_pedestrians_label)
- metrics_layout.addWidget(vehicle_metrics)
+ # Start with empty table - no sample data
+ layout.addWidget(self.traffic_table)
- # Violation metrics
- violation_metrics = QGroupBox("Violations")
- violation_layout = QVBoxLayout(violation_metrics)
- self.total_violations_label = QLabel("Total Violations: 0")
- self.peak_violation_label = QLabel("Peak Violation Hour: --")
- violation_layout.addWidget(self.total_violations_label)
- violation_layout.addWidget(self.peak_violation_label)
- metrics_layout.addWidget(violation_metrics)
+ self.tab_widget.addTab(tab, "🚦 Traffic Lights")
- # Performance metrics
- performance_metrics = QGroupBox("Performance")
- performance_layout = QVBoxLayout(performance_metrics)
- self.avg_fps_label = QLabel("Avg FPS: 0")
- self.avg_processing_label = QLabel("Avg Processing Time: 0 ms")
- performance_layout.addWidget(self.avg_fps_label)
- performance_layout.addWidget(self.avg_processing_label)
- metrics_layout.addWidget(performance_metrics)
+ def create_violation_tab(self):
+ """Create violation summary tab"""
+ tab = QWidget()
+ layout = QVBoxLayout(tab)
+ layout.setContentsMargins(15, 15, 15, 15)
- main_layout.addWidget(metrics_box)
+ # Table
+ self.violation_table = QTableWidget(0, 3)
+ self.violation_table.setHorizontalHeaderLabels([
+ "Track ID", "Violation Type", "Status"
+ ])
- # Controls
- controls = QHBoxLayout()
- self.reset_btn = QPushButton("Reset Statistics")
- controls.addWidget(self.reset_btn)
- controls.addStretch(1) # Push button to left
+ # Apply clean table styling
+ self.apply_table_style(self.violation_table)
- main_layout.addLayout(controls)
+ # Start with empty table - no sample data
+ layout.addWidget(self.violation_table)
+
+ self.tab_widget.addTab(tab, "🚨 Violations")
+
+ def create_vehicle_tab(self):
+ """Create vehicle tracking status tab"""
+ tab = QWidget()
+ layout = QVBoxLayout(tab)
+ layout.setContentsMargins(15, 15, 15, 15)
+
+ # Table
+ self.vehicle_table = QTableWidget(0, 6)
+ self.vehicle_table.setHorizontalHeaderLabels([
+ "Track ID", "Position (x,y)", "Center Y", "Moving", "Violating", "Status"
+ ])
+
+ # Apply clean table styling
+ self.apply_table_style(self.vehicle_table)
+
+ # Start with empty table - no sample data
+ layout.addWidget(self.vehicle_table)
+
+ self.tab_widget.addTab(tab, "🚗 Vehicles")
+
+ def apply_table_style(self, table):
+ """Apply consistent styling to tables"""
+ # Set font
+ font = QFont("Roboto", 10)
+ table.setFont(font)
+
+ # Header styling
+ table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
+ table.horizontalHeader().setStyleSheet("""
+ QHeaderView::section {
+ background: qlineargradient(x1:0, y1:0, x2:0, y2:1,
+ stop:0 #1A252F, stop:1 #2C3E50);
+ color: #FFFFFF;
+ padding: 10px;
+ border: 1px solid #2C3E50;
+ font-weight: bold;
+ font-family: 'Roboto', Arial, sans-serif;
+ }
+ """)
+
+ # Table styling
+ table.setStyleSheet("""
+ QTableWidget {
+ gridline-color: #34495E;
+ background-color: #2C3E50;
+ alternate-background-color: #34495E;
+ selection-background-color: #3498DB;
+ border: 1px solid #34495E;
+ border-radius: 6px;
+ color: #FFFFFF;
+ }
+ QTableWidget::item {
+ padding: 8px;
+ border-bottom: 1px solid #34495E;
+ color: #FFFFFF;
+ }
+ QTableWidget::item:selected {
+ background-color: #3498DB;
+ color: #FFFFFF;
+ }
+ """)
+
+ # Enable alternating row colors
+ table.setAlternatingRowColors(True)
+
+ # Set selection behavior
+ table.setSelectionBehavior(QTableWidget.SelectRows)
+
+ def populate_table(self, table, data, table_type):
+ """Populate table with data and apply color coding for dark theme"""
+ table.setRowCount(len(data))
+
+ for i, row in enumerate(data):
+ for j, item in enumerate(row):
+ cell = QTableWidgetItem(str(item))
+ cell.setForeground(QColor(255, 255, 255)) # White text
+
+ # Apply color coding based on content for dark theme
+ if table_type == "traffic_light":
+ if "🔴" in str(item):
+ cell.setBackground(QColor(139, 69, 19)) # Dark red/brown
+ elif "🟡" in str(item):
+ cell.setBackground(QColor(184, 134, 11)) # Dark yellow
+ elif "🟢" in str(item):
+ cell.setBackground(QColor(34, 139, 34)) # Dark green
+
+ elif table_type == "violation":
+ if "Active" in str(item) or "🚨" in str(item):
+ cell.setBackground(QColor(139, 69, 19)) # Dark red/brown
+ cell.setForeground(QColor(255, 255, 255)) # White text
+ elif "Detected" in str(item):
+ cell.setBackground(QColor(205, 133, 63)) # Dark orange
+ cell.setForeground(QColor(255, 255, 255)) # White text
+
+ elif table_type == "vehicle":
+ if "🔴" in str(item) or ("True" in str(item) and j == 4): # Violating column
+ cell.setBackground(QColor(139, 69, 19)) # Dark red/brown
+ cell.setForeground(QColor(255, 255, 255)) # White text
+ elif "🟢" in str(item):
+ cell.setBackground(QColor(34, 139, 34)) # Dark green
+ cell.setForeground(QColor(255, 255, 255)) # White text
+
+ table.setItem(i, j, cell)
+
+ def refresh_all_data(self):
+ """Refresh all tables with latest data"""
+ print("🔄 Refreshing analytics data...")
+ self.update_traffic_lights_table()
+ self.update_violations_table()
+ self.update_vehicles_table()
@Slot(dict)
- def update_analytics(self, analytics):
- """
- Update analytics display with new data.
-
- Args:
- analytics: Dictionary of analytics data
- """
+ def update_detection_data(self, detection_data):
+ """Update analytics with detection data from video tab"""
try:
- if not analytics:
- return
-
- # Update latency chart
- try:
- if hasattr(self, 'latency_chart') and self.latency_chart is not None:
- self.latency_chart.update_data(analytics.get('latency', {}))
- except Exception as e:
- print(f"[WARNING] Latency chart update failed: {e}")
+ print(f"[ANALYTICS UPDATE] Received detection data with keys: {list(detection_data.keys())}")
+ self.latest_frame_data = detection_data
- # Update charts with error handling
- try:
- if hasattr(self, 'time_series_chart') and self.time_series_chart is not None:
- self.time_series_chart.update_data(analytics.get('time_series', {}))
- except Exception as e:
- print(f"[WARNING] Time series chart update failed: {e}")
-
- try:
- if hasattr(self, 'detection_chart') and self.detection_chart is not None:
- self.detection_chart.update_data(analytics.get('detection_counts', {}))
- except Exception as e:
- print(f"[WARNING] Detection chart update failed: {e}")
-
- try:
- if hasattr(self, 'violation_chart') and self.violation_chart is not None:
- self.violation_chart.update_data(analytics.get('violation_counts', {}))
- except Exception as e:
- print(f"[WARNING] Violation chart update failed: {e}")
+ # Extract traffic lights
+ detections = detection_data.get('detections', [])
+ traffic_lights = []
+ vehicles = []
- # Update metrics
- try:
- metrics = analytics.get('metrics', {})
-
- if hasattr(self, 'total_vehicles_label'):
- self.total_vehicles_label.setText(f"Total Vehicles: {metrics.get('total_vehicles', 0)}")
- if hasattr(self, 'total_pedestrians_label'):
- self.total_pedestrians_label.setText(f"Total Pedestrians: {metrics.get('total_pedestrians', 0)}")
-
- if hasattr(self, 'total_violations_label'):
- self.total_violations_label.setText(f"Total Violations: {metrics.get('total_violations', 0)}")
-
- peak_hour = metrics.get('peak_violation_hour')
- if peak_hour:
- peak_text = f"Peak Violation Hour: {peak_hour.get('time', '--')} ({peak_hour.get('violations', 0)})"
+ for detection in detections:
+ if hasattr(detection, 'label'):
+ label = detection.label
+ elif isinstance(detection, dict):
+ label = detection.get('label', detection.get('class', detection.get('class_name', '')))
else:
- peak_text = "Peak Violation Hour: --"
- if hasattr(self, 'peak_violation_label'):
- self.peak_violation_label.setText(peak_text)
+ label = str(detection)
- if hasattr(self, 'avg_fps_label'):
- self.avg_fps_label.setText(f"Avg FPS: {metrics.get('avg_fps', 0):.1f}")
- if hasattr(self, 'avg_processing_label'):
- self.avg_processing_label.setText(
- f"Avg Processing Time: {metrics.get('avg_processing_time', 0):.1f} ms"
- )
- except Exception as e:
- print(f"[WARNING] Metrics update failed: {e}")
+ if 'traffic light' in str(label).lower():
+ traffic_lights.append(detection)
+ elif any(vehicle_type in str(label).lower() for vehicle_type in ['car', 'truck', 'bus', 'motorcycle']):
+ vehicles.append(detection)
+
+ self.latest_traffic_lights = traffic_lights
+
+ # Extract vehicle tracking data - Handle the EXACT structure from video controller
+ tracked_vehicles = detection_data.get('tracked_vehicles', [])
+ print(f"[ANALYTICS UPDATE] Found {len(tracked_vehicles)} tracked vehicles")
+
+ # Process tracked vehicles with the correct structure
+ processed_vehicles = []
+ for vehicle in tracked_vehicles:
+ print(f"[ANALYTICS UPDATE] Raw vehicle data: {vehicle}")
- # Update traffic light label with latest color
- try:
- tl_series = analytics.get('traffic_light_color_series', [])
- if tl_series:
- latest = tl_series[-1][1]
- self.traffic_light_label.setText(f"Traffic Light: {latest.title()}")
+ # Handle the actual structure: {id, bbox, center_y, is_moving, is_violation}
+ if isinstance(vehicle, dict):
+ track_id = vehicle.get('id', 'Unknown')
+ bbox = vehicle.get('bbox', [0, 0, 0, 0])
+ center_y = vehicle.get('center_y', 0)
+ moving = vehicle.get('is_moving', False)
+ violating = vehicle.get('is_violation', False)
+
+ # Calculate center_x from bbox
+ if len(bbox) >= 4:
+ center_x = (bbox[0] + bbox[2]) / 2
+ else:
+ center_x = 0
+
else:
- self.traffic_light_label.setText("Traffic Light: Unknown")
- except Exception as e:
- print(f"[WARNING] Traffic light label update failed: {e}")
+ # Fallback for other object types
+ track_id = getattr(vehicle, 'id', getattr(vehicle, 'track_id', 'Unknown'))
+ bbox = getattr(vehicle, 'bbox', [0, 0, 0, 0])
+ center_y = getattr(vehicle, 'center_y', 0)
+ moving = getattr(vehicle, 'is_moving', getattr(vehicle, 'moving', False))
+ violating = getattr(vehicle, 'is_violation', getattr(vehicle, 'violating', False))
+
+ if len(bbox) >= 4:
+ center_x = (bbox[0] + bbox[2]) / 2
+ else:
+ center_x = 0
+ processed_vehicles.append({
+ 'track_id': track_id,
+ 'center': (center_x, center_y),
+ 'moving': moving,
+ 'violating': violating
+ })
+
+ print(f"[ANALYTICS UPDATE] Processed vehicle ID={track_id}, center=({center_x:.1f}, {center_y:.1f}), moving={moving}, violating={violating}")
+
+ self.latest_vehicles = processed_vehicles
+ print(f"[ANALYTICS UPDATE] Stored {len(self.latest_vehicles)} processed vehicles")
+
+ # Update tables with new data
+ self.update_traffic_lights_table()
+ self.update_vehicles_table()
+
except Exception as e:
- print(f"[ERROR] Analytics update failed: {e}")
+ print(f"Error updating detection data: {e}")
+ import traceback
+ traceback.print_exc()
+
+ @Slot(dict)
+ def update_violation_data(self, violation_data):
+ """Update violations data"""
+ try:
+ # Store violation data
+ track_id = violation_data.get('track_id')
+ violation_type = violation_data.get('type', 'Unknown')
+
+ # Add to violations list if not already present
+ existing = [v for v in self.latest_violations if v.get('track_id') == track_id]
+ if not existing:
+ self.latest_violations.append({
+ 'track_id': track_id,
+ 'type': violation_type,
+ 'status': 'Active',
+ 'timestamp': violation_data.get('timestamp', '')
+ })
+
+ self.update_violations_table()
+
+ except Exception as e:
+ print(f"Error updating violation data: {e}")
+
+ def update_traffic_lights_table(self):
+ """Update traffic lights table with latest data"""
+ try:
+ data = []
+
+ # Check if we have traffic light data from frame analysis
+ latest_traffic_light = self.latest_frame_data.get('traffic_light', {})
+ if latest_traffic_light:
+ # Extract traffic light info
+ color = latest_traffic_light.get('color', 'unknown')
+ confidence = latest_traffic_light.get('confidence', 0.0)
+
+ # Create traffic light entries based on the detected signal
+ if color == 'red':
+ status = "🔴 Red"
+ red_ratio = confidence
+ yellow_ratio = 0.0
+ green_ratio = 0.0
+ elif color == 'yellow':
+ status = "🟡 Yellow"
+ red_ratio = 0.0
+ yellow_ratio = confidence
+ green_ratio = 0.0
+ elif color == 'green':
+ status = "🟢 Green"
+ red_ratio = 0.0
+ yellow_ratio = 0.0
+ green_ratio = confidence
+ else:
+ status = "❓ Unknown"
+ red_ratio = 0.0
+ yellow_ratio = 0.0
+ green_ratio = 0.0
+
+ data.append([
+ "Main Traffic Light",
+ f"{red_ratio:.3f}",
+ f"{yellow_ratio:.3f}",
+ f"{green_ratio:.3f}",
+ status
+ ])
+
+ # Also check for individual traffic light detections
+ for i, tl in enumerate(self.latest_traffic_lights):
+ bbox = tl.get('bbox', [0, 0, 0, 0])
+ # Extract color ratios from debug data if available
+ color_info = tl.get('color_info', {})
+ red_ratio = color_info.get('red', 0.0)
+ yellow_ratio = color_info.get('yellow', 0.0)
+ green_ratio = color_info.get('green', 0.0)
+
+ # Determine status
+ if red_ratio > 0.3:
+ status = "🔴 Red"
+ elif yellow_ratio > 0.3:
+ status = "🟡 Yellow"
+ elif green_ratio > 0.3:
+ status = "🟢 Green"
+ else:
+ status = "❓ Unknown"
+
+ data.append([
+ f"Traffic Light {i+1}",
+ f"{red_ratio:.3f}",
+ f"{yellow_ratio:.3f}",
+ f"{green_ratio:.3f}",
+ status
+ ])
+
+ # If no data, show empty table instead of sample data
+ if not data:
+ data = []
+
+ self.populate_table(self.traffic_table, data, "traffic_light")
+
+ except Exception as e:
+ print(f"Error updating traffic lights table: {e}")
+
+ def update_violations_table(self):
+ """Update violations table with latest data"""
+ try:
+ data = []
+ for violation in self.latest_violations:
+ data.append([
+ str(violation.get('track_id', 'Unknown')),
+ f"🚨 {violation.get('type', 'Unknown')}",
+ violation.get('status', 'Active')
+ ])
+
+ # If no violations, show empty table
+ if not data:
+ data = []
+
+ self.populate_table(self.violation_table, data, "violation")
+
+ except Exception as e:
+ print(f"Error updating violations table: {e}")
+
+ def update_vehicles_table(self):
+ """Update vehicles table with latest data"""
+ try:
+ print(f"[ANALYTICS UPDATE] Updating vehicles table with {len(self.latest_vehicles)} vehicles")
+ data = []
+
+ for vehicle in self.latest_vehicles:
+ track_id = vehicle.get('track_id', 'Unknown')
+ center = vehicle.get('center', (0, 0))
+ position = f"({center[0]:.1f}, {center[1]:.1f})"
+ center_y = center[1] if len(center) > 1 else 0
+ moving = vehicle.get('moving', False)
+ violating = vehicle.get('violating', False)
+
+ if violating:
+ status = "🔴 Violating"
+ elif moving:
+ status = "🟡 Moving"
+ else:
+ status = "🟢 Stopped"
+
+ data.append([
+ str(track_id),
+ position,
+ f"{center_y:.1f}",
+ str(moving),
+ str(violating),
+ status
+ ])
+
+ print(f"[ANALYTICS UPDATE] Added vehicle row: ID={track_id}, pos={position}, moving={moving}, violating={violating}, status={status}")
+
+ print(f"[ANALYTICS UPDATE] Total vehicle rows to display: {len(data)}")
+
+ # If no vehicles, show empty table
+ if not data:
+ data = []
+
+ self.populate_table(self.vehicle_table, data, "vehicle")
+
+ except Exception as e:
+ print(f"Error updating vehicles table: {e}")
+ import traceback
+ traceback.print_exc()
+
+
+class AnalyticsTab(QWidget):
+ """Main analytics tab with clean design"""
+
+ def __init__(self):
+ super().__init__()
+ self.init_ui()
+
+ def init_ui(self):
+ """Initialize the main analytics interface"""
+ layout = QVBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+
+ # Create the clean analytics widget
+ self.analytics_widget = CleanAnalyticsWidget()
+ layout.addWidget(self.analytics_widget)
+
+ @Slot(dict)
+ def update_analytics(self, analytics):
+ """Update analytics with new data"""
+ # Forward to the analytics widget
+ if hasattr(self.analytics_widget, 'update_detection_data'):
+ self.analytics_widget.update_detection_data(analytics)
+
+ @Slot(dict)
+ def update_detection_data(self, detection_data):
+ """Update detection data from video tab"""
+ self.analytics_widget.update_detection_data(detection_data)
+
+ @Slot(dict)
+ def update_violation_data(self, violation_data):
+ """Update violation data"""
+ self.analytics_widget.update_violation_data(violation_data)
+
+ @Slot(dict)
+ def update_smart_intersection_analytics(self, analytics_data):
+ """Update smart intersection analytics"""
+ # Extract relevant data and forward
+ if 'detections' in analytics_data:
+ self.analytics_widget.update_detection_data(analytics_data)
+ if 'violations' in analytics_data:
+ for violation in analytics_data['violations']:
+ self.analytics_widget.update_violation_data(violation)
diff --git a/qt_app_pyside1/ui/config_panel.py b/qt_app_pyside1/ui/config_panel.py
index 8563410..5080dec 100644
--- a/qt_app_pyside1/ui/config_panel.py
+++ b/qt_app_pyside1/ui/config_panel.py
@@ -2,11 +2,14 @@ from PySide6.QtWidgets import (
QWidget, QVBoxLayout, QHBoxLayout, QLabel, QComboBox,
QSlider, QCheckBox, QPushButton, QGroupBox, QFormLayout,
QSpinBox, QDoubleSpinBox, QTabWidget, QLineEdit, QFileDialog,
- QSpacerItem, QSizePolicy
+ QSpacerItem, QSizePolicy, QScrollArea
)
from PySide6.QtCore import Qt, Signal, Slot
from PySide6.QtGui import QFont
+# Import VLM insights widget
+from ui.vlm_insights_widget import VLMInsightsWidget
+
class ConfigPanel(QWidget):
"""Side panel for application configuration."""
@@ -362,10 +365,62 @@ class ConfigPanel(QWidget):
violation_layout.addWidget(violation_group)
+ # === VLM Insights Tab ===
+ vlm_tab = QWidget()
+ vlm_layout = QVBoxLayout(vlm_tab)
+
+ # Create scroll area for VLM insights
+ vlm_scroll = QScrollArea()
+ vlm_scroll.setWidgetResizable(True)
+ vlm_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
+ vlm_scroll.setStyleSheet("""
+ QScrollArea {
+ border: none;
+ background: transparent;
+ }
+ """)
+
+ # Add VLM insights widget
+ print("[CONFIG PANEL DEBUG] Creating VLM insights widget...")
+ self.vlm_insights = VLMInsightsWidget()
+ print("[CONFIG PANEL DEBUG] VLM insights widget created successfully")
+ vlm_scroll.setWidget(self.vlm_insights)
+ vlm_layout.addWidget(vlm_scroll)
+
+ # Smart Intersection Tab - Scene Analytics
+ smart_intersection_tab = QWidget()
+ si_layout = QVBoxLayout(smart_intersection_tab)
+
+ # Smart Intersection config widget
+ si_scroll = QScrollArea()
+ si_scroll.setWidgetResizable(True)
+ si_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
+ si_scroll.setStyleSheet("""
+ QScrollArea {
+ border: none;
+ background: transparent;
+ }
+ """)
+
+ try:
+ from ui.smart_intersection_config import SmartIntersectionConfigPanel
+ self.smart_intersection_config = SmartIntersectionConfigPanel()
+ si_scroll.setWidget(self.smart_intersection_config)
+ print("[CONFIG PANEL DEBUG] Smart Intersection config panel created successfully")
+ except Exception as e:
+ print(f"[CONFIG PANEL DEBUG] Error creating Smart Intersection config: {e}")
+ self.smart_intersection_config = None
+ si_scroll.setWidget(QLabel(f"Smart Intersection config unavailable: {e}"))
+
+ si_layout.addWidget(si_scroll)
+
# Add all tabs
tabs.addTab(detection_tab, "Detection")
tabs.addTab(display_tab, "Display")
tabs.addTab(violation_tab, "Violations")
+ tabs.addTab(vlm_tab, "🤖 AI Insights") # Add VLM insights tab
+ tabs.addTab(smart_intersection_tab, "🚦 Smart Intersection") # Add Smart Intersection tab
+ print("[CONFIG PANEL DEBUG] Added AI Insights and Smart Intersection tabs to config panel")
layout.addWidget(tabs)
diff --git a/qt_app_pyside1/ui/main_window.py b/qt_app_pyside1/ui/main_window.py
index d05cdd8..3167a16 100644
--- a/qt_app_pyside1/ui/main_window.py
+++ b/qt_app_pyside1/ui/main_window.py
@@ -1,9 +1,9 @@
from PySide6.QtWidgets import (
QMainWindow, QTabWidget, QDockWidget, QMessageBox,
- QApplication, QFileDialog, QSplashScreen, QVBoxLayout, QWidget
+ QApplication, QFileDialog, QSplashScreen, QVBoxLayout, QWidget, QLabel
)
from PySide6.QtCore import Qt, QTimer, QSettings, QSize, Slot
-from PySide6.QtGui import QIcon, QPixmap, QAction
+from PySide6.QtGui import QIcon, QPixmap, QAction, QFont
import os
import sys
@@ -24,19 +24,26 @@ if hasattr(Qt, 'qInstallMessageHandler'):
from ui.analytics_tab import AnalyticsTab
from ui.violations_tab import ViolationsTab
from ui.export_tab import ExportTab
-from ui.config_panel import ConfigPanel
-from ui.live_multi_cam_tab import LiveMultiCamTab
-from ui.video_detection_tab import VideoDetectionTab
+from ui.modern_config_panel import ModernConfigPanel
+from ui.modern_live_detection_tab import ModernLiveDetectionTab
+# from ui.video_analysis_tab import VideoAnalysisTab
+# from ui.video_detection_tab import VideoDetectionTab # Commented out - split into two separate tabs
+from ui.video_detection_only_tab import VideoDetectionOnlyTab
+from ui.smart_intersection_tab import SmartIntersectionTab
from ui.global_status_panel import GlobalStatusPanel
+from ui.vlm_insights_widget import VLMInsightsWidget # Import the new VLM Insights Widget
+from ui.dashboard_tab import DashboardTab # Import the new Dashboard Tab
# Import controllers
from controllers.video_controller_new import VideoController
from controllers.analytics_controller import AnalyticsController
from controllers.performance_overlay import PerformanceOverlay
from controllers.model_manager import ModelManager
+# VLM Controller removed - functionality moved to insights widget
# Import utilities
from utils.helpers import load_configuration, save_configuration, save_snapshot
+from utils.data_publisher import DataPublisher
class MainWindow(QMainWindow):
"""Main application window."""
@@ -58,6 +65,9 @@ class MainWindow(QMainWindow):
# Connect signals and slots
self.connectSignals()
+ # Initialize config panel with current configuration
+ self.config_panel.set_config(self.config)
+
# Restore settings
self.restoreSettings()
@@ -70,49 +80,134 @@ class MainWindow(QMainWindow):
def setupUI(self):
"""Set up the user interface"""
# Window properties
- self.setWindowTitle("Traffic Monitoring System (OpenVINO PySide6)")
+ self.setWindowTitle("Traffic Intersection Monitoring System")
self.setMinimumSize(1200, 800)
self.resize(1400, 900)
# Set up central widget with tabs
self.tabs = QTabWidget()
+ # Style the tabs
+ self.tabs.setStyleSheet("""
+ QTabWidget::pane {
+ border: 1px solid #444;
+ background-color: #2b2b2b;
+ }
+ QTabBar::tab {
+ background-color: #3c3c3c;
+ color: white;
+ padding: 8px 16px;
+ margin: 2px;
+ border: 1px solid #555;
+ border-bottom: none;
+ border-radius: 4px 4px 0px 0px;
+ min-width: 120px;
+ }
+ QTabBar::tab:selected {
+ background-color: #0078d4;
+ border-color: #0078d4;
+ }
+ QTabBar::tab:hover {
+ background-color: #4a4a4a;
+ }
+ QTabBar::tab:!selected {
+ margin-top: 2px;
+ }
+ """)
+
# Create tabs
- self.live_tab = LiveMultiCamTab()
- self.video_detection_tab = VideoDetectionTab()
+ self.live_tab = ModernLiveDetectionTab()
+ # self.video_analysis_tab = VideoAnalysisTab()
+ # self.video_detection_tab = VideoDetectionTab() # Commented out - split into two separate tabs
+ self.video_detection_only_tab = VideoDetectionOnlyTab()
+ self.smart_intersection_tab = SmartIntersectionTab()
self.analytics_tab = AnalyticsTab()
self.violations_tab = ViolationsTab()
self.export_tab = ExportTab()
+ # Remove VLM tab - VLM functionality moved to settings panel
+ # self.vlm_tab = VLMTab() # Create the VLM tab
from ui.performance_graphs import PerformanceGraphsWidget
self.performance_tab = PerformanceGraphsWidget()
+ # Add Dashboard tab
+ try:
+ self.dashboard_tab = DashboardTab()
+ except Exception as e:
+ print(f"Warning: Could not create Dashboard tab: {e}")
+ self.dashboard_tab = None
+
+ # Add User Guide tab
+ try:
+ from ui.user_guide_tab import UserGuideTab
+ self.user_guide_tab = UserGuideTab()
+ except Exception as e:
+ print(f"Warning: Could not create User Guide tab: {e}")
+ self.user_guide_tab = None
+
# Add tabs to tab widget
self.tabs.addTab(self.live_tab, "Live Detection")
- self.tabs.addTab(self.video_detection_tab, "Video Detection")
- self.tabs.addTab(self.performance_tab, "🔥 Performance & Latency")
+ # self.tabs.addTab(self.video_analysis_tab, "Video Analysis")
+ # self.tabs.addTab(self.video_detection_tab, "Smart Intersection") # Commented out - split into two tabs
+ self.tabs.addTab(self.video_detection_only_tab, "Video Detection")
+ # self.tabs.addTab(self.smart_intersection_tab, "Smart Intersection") # Temporarily hidden
+ if self.dashboard_tab:
+ self.tabs.addTab(self.dashboard_tab, "Dashboard")
+ self.tabs.addTab(self.performance_tab, "Performance & Latency")
self.tabs.addTab(self.analytics_tab, "Analytics")
self.tabs.addTab(self.violations_tab, "Violations")
+ # VLM functionality moved to settings panel
+ # self.tabs.addTab(self.vlm_tab, "🔍 Vision AI") # Add VLM tab with icon
self.tabs.addTab(self.export_tab, "Export & Config")
+ # Add User Guide tab if available
+ if self.user_guide_tab:
+ self.tabs.addTab(self.user_guide_tab, "Help")
+
# Create config panel in dock widget
- self.config_panel = ConfigPanel()
+ self.config_panel = ModernConfigPanel()
dock = QDockWidget("Settings", self)
dock.setObjectName("SettingsDock") # Set object name to avoid warning
dock.setWidget(self.config_panel)
dock.setFeatures(QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetClosable)
dock.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+
+ # Set minimum and preferred size for the dock widget
+ dock.setMinimumWidth(400)
+ dock.resize(450, 800) # Set preferred width and height
+
self.addDockWidget(Qt.RightDockWidgetArea, dock)
# Create status bar
self.statusBar().showMessage("Initializing...")
+
+ # Create main layout with header
main_layout = QVBoxLayout()
+
+ # Add header title above tabs
+ header_label = QLabel("Traffic Intersection Monitoring System")
+ header_label.setAlignment(Qt.AlignCenter)
+ header_font = QFont()
+ header_font.setPointSize(14)
+ header_font.setBold(True)
+ header_label.setFont(header_font)
+ header_label.setStyleSheet("""
+ QLabel {
+ color: #ffffff;
+ background-color: #2b2b2b;
+ padding: 10px;
+ border-bottom: 2px solid #0078d4;
+ margin-bottom: 5px;
+ }
+ """)
+ main_layout.addWidget(header_label)
+
main_layout.addWidget(self.tabs)
central = QWidget()
central.setLayout(main_layout)
self.setCentralWidget(central)
- # Create menu bar
- self.setupMenus()
+ # Create menu bar - commented out for cleaner interface
+ # self.setupMenus()
# Create performance overlay
self.performance_overlay = PerformanceOverlay()
@@ -131,6 +226,17 @@ class MainWindow(QMainWindow):
# Create analytics controller
self.analytics_controller = AnalyticsController()
+
+ # Initialize data publisher for InfluxDB
+ print("[MAIN WINDOW DEBUG] Initializing Data Publisher...")
+ self.data_publisher = DataPublisher(self.config_file)
+ print("[MAIN WINDOW DEBUG] Data Publisher initialized successfully")
+
+ # VLM controller - using only local VLM folder, no backend
+ print("[MAIN WINDOW DEBUG] Initializing VLM Controller with local VLM folder...")
+ from controllers.vlm_controller_new import VLMController
+ self.vlm_controller = VLMController() # No backend URL needed
+ print("[MAIN WINDOW DEBUG] VLM Controller initialized successfully")
# Setup update timer for performance overlay
self.perf_timer = QTimer()
@@ -138,11 +244,56 @@ class MainWindow(QMainWindow):
self.perf_timer.start(1000) # Update every second
# Connect video_file_controller outputs to video_detection_tab
- self.video_file_controller.frame_ready.connect(self.video_detection_tab.update_display, Qt.QueuedConnection)
- self.video_file_controller.stats_ready.connect(self.video_detection_tab.update_stats, Qt.QueuedConnection)
- self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.video_detection_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection)
+ # Connect video file controller signals to both video tabs
+ self.video_file_controller.frame_ready.connect(self.video_detection_only_tab.update_display, Qt.QueuedConnection)
+ self.video_file_controller.stats_ready.connect(self.video_detection_only_tab.update_stats, Qt.QueuedConnection)
+ self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.video_detection_only_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection)
+
+ self.video_file_controller.frame_ready.connect(self.smart_intersection_tab.update_display, Qt.QueuedConnection)
+ self.video_file_controller.stats_ready.connect(self.smart_intersection_tab.update_stats, Qt.QueuedConnection)
+ self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.smart_intersection_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection)
+
+ # Connect video frames to VLM insights for analysis
+ if hasattr(self.video_file_controller, 'raw_frame_ready'):
+ print("[MAIN WINDOW DEBUG] Connecting raw_frame_ready signal to VLM insights")
+ self.video_file_controller.raw_frame_ready.connect(
+ self._forward_frame_to_vlm, Qt.QueuedConnection
+ )
+ print("[MAIN WINDOW DEBUG] raw_frame_ready signal connected to VLM insights")
+
+ # Also connect to analytics tab
+ print("[MAIN WINDOW DEBUG] Connecting raw_frame_ready signal to analytics tab")
+ self.video_file_controller.raw_frame_ready.connect(
+ self._forward_frame_to_analytics, Qt.QueuedConnection
+ )
+ print("[MAIN WINDOW DEBUG] raw_frame_ready signal connected to analytics tab")
+ else:
+ print("[MAIN WINDOW DEBUG] raw_frame_ready signal not found in video_file_controller")
# Connect auto model/device selection signal
- self.video_detection_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection)
+ # Connect video tab auto-select signals
+ self.video_detection_only_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection)
+ self.smart_intersection_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection)
+
+ # Connect VLM insights analysis requests to a simple mock handler (since optimum is disabled)
+ print("[MAIN WINDOW DEBUG] Checking for VLM insights widget...")
+ if hasattr(self.config_panel, 'vlm_insights_widget'):
+ print("[MAIN WINDOW DEBUG] VLM insights widget found, connecting signals...")
+ self.config_panel.vlm_insights_widget.analyze_frame_requested.connect(self._handle_vlm_analysis, Qt.QueuedConnection)
+ print("[MAIN WINDOW DEBUG] VLM insights analysis signal connected")
+
+ # Connect pause state signal from video file controller to VLM insights
+ if hasattr(self.video_file_controller, 'pause_state_changed'):
+ self.video_file_controller.pause_state_changed.connect(self.config_panel.vlm_insights_widget.on_video_paused, Qt.QueuedConnection)
+ print("[MAIN WINDOW DEBUG] VLM insights pause state signal connected")
+ else:
+ print("[MAIN WINDOW DEBUG] pause_state_changed signal not found in video_file_controller")
+ else:
+ print("[MAIN WINDOW DEBUG] VLM insights widget NOT found in config panel")
+
+ # Old VLM tab connections removed - functionality moved to insights widget
+ # self.vlm_tab.process_image_requested.connect(self.vlm_controller.process_image, Qt.QueuedConnection)
+ # self.video_controller.frame_np_ready.connect(self.vlm_tab.set_frame, Qt.QueuedConnection)
+ # self.video_file_controller.frame_np_ready.connect(self.vlm_tab.set_frame, Qt.QueuedConnection)
except Exception as e:
QMessageBox.critical(
self,
@@ -150,6 +301,7 @@ class MainWindow(QMainWindow):
f"Error initializing controllers: {str(e)}"
)
print(f"Error details: {e}")
+ traceback.print_exc()
def connectSignals(self):
@@ -212,14 +364,46 @@ class MainWindow(QMainWindow):
self.export_tab.reload_config_btn.clicked.connect(self.load_config)
self.export_tab.export_btn.clicked.connect(self.export_data)
- # Video Detection tab connections
- self.video_detection_tab.file_selected.connect(self._handle_video_file_selected)
- self.video_detection_tab.play_clicked.connect(self._handle_video_play)
- self.video_detection_tab.pause_clicked.connect(self._handle_video_pause)
- self.video_detection_tab.stop_clicked.connect(self._handle_video_stop)
- self.video_detection_tab.detection_toggled.connect(self._handle_video_detection_toggle)
- self.video_detection_tab.screenshot_clicked.connect(self._handle_video_screenshot)
- self.video_detection_tab.seek_changed.connect(self._handle_video_seek)
+ # Video Detection tab connections (standard tab)
+ self.video_detection_only_tab.file_selected.connect(self._handle_video_file_selected)
+ self.video_detection_only_tab.play_clicked.connect(self._handle_video_play)
+ self.video_detection_only_tab.pause_clicked.connect(self._handle_video_pause)
+ self.video_detection_only_tab.stop_clicked.connect(self._handle_video_stop)
+ self.video_detection_only_tab.detection_toggled.connect(self._handle_video_detection_toggle)
+ self.video_detection_only_tab.screenshot_clicked.connect(self._handle_video_screenshot)
+ self.video_detection_only_tab.seek_changed.connect(self._handle_video_seek)
+
+ # Smart Intersection tab connections
+ self.smart_intersection_tab.file_selected.connect(self._handle_video_file_selected)
+ self.smart_intersection_tab.play_clicked.connect(self._handle_video_play)
+ self.smart_intersection_tab.pause_clicked.connect(self._handle_video_pause)
+ self.smart_intersection_tab.stop_clicked.connect(self._handle_video_stop)
+ self.smart_intersection_tab.detection_toggled.connect(self._handle_video_detection_toggle)
+ self.smart_intersection_tab.screenshot_clicked.connect(self._handle_video_screenshot)
+ self.smart_intersection_tab.seek_changed.connect(self._handle_video_seek)
+
+ # Smart Intersection specific connections
+ self.smart_intersection_tab.smart_intersection_enabled.connect(self._handle_smart_intersection_enabled)
+ self.smart_intersection_tab.multi_camera_mode_enabled.connect(self._handle_multi_camera_mode)
+ self.smart_intersection_tab.roi_configuration_changed.connect(self._handle_roi_configuration_changed)
+ self.smart_intersection_tab.scene_analytics_toggled.connect(self._handle_scene_analytics_toggle)
+
+ # Connect smart intersection controller if available
+ try:
+ from controllers.smart_intersection_controller import SmartIntersectionController
+ self.smart_intersection_controller = SmartIntersectionController()
+
+ # Connect scene analytics signals
+ self.video_file_controller.frame_np_ready.connect(
+ self.smart_intersection_controller.process_frame, Qt.QueuedConnection
+ )
+ self.smart_intersection_controller.scene_analytics_ready.connect(
+ self._handle_scene_analytics_update, Qt.QueuedConnection
+ )
+ print("✅ Smart Intersection Controller connected")
+ except Exception as e:
+ print(f"⚠️ Smart Intersection Controller not available: {e}")
+ self.smart_intersection_controller = None
# Connect OpenVINO device info signal to config panel from BOTH controllers
self.video_controller.device_info_ready.connect(self.config_panel.update_devices_info, Qt.QueuedConnection)
@@ -227,7 +411,57 @@ class MainWindow(QMainWindow):
# After connecting video_file_controller and video_detection_tab, trigger auto model/device update
QTimer.singleShot(0, self.video_file_controller.auto_select_model_device.emit)
+
+ # Connect performance statistics from both controllers
self.video_controller.performance_stats_ready.connect(self.update_performance_graphs)
+ self.video_file_controller.performance_stats_ready.connect(self.update_performance_graphs)
+
+ # Connect enhanced performance tab signals
+ if hasattr(self, 'performance_tab'):
+ try:
+ # Connect performance tab signals for better integration
+ self.performance_tab.spike_detected.connect(self.handle_performance_spike)
+ self.performance_tab.device_switched.connect(self.handle_device_switch_notification)
+ self.performance_tab.performance_data_updated.connect(self.handle_performance_data_update)
+ print("✅ Performance tab signals connected successfully")
+ except Exception as e:
+ print(f"⚠️ Could not connect performance tab signals: {e}")
+
+ @Slot(dict)
+ def handle_performance_spike(self, spike_data):
+ """Handle performance spike detection"""
+ try:
+ latency = spike_data.get('latency', 0)
+ device = spike_data.get('device', 'Unknown')
+ print(f"🚨 Performance spike detected: {latency:.1f}ms on {device}")
+
+ # Optionally show notification or log to analytics
+ if hasattr(self, 'analytics_tab'):
+ # Could add spike to analytics if needed
+ pass
+
+ except Exception as e:
+ print(f"❌ Error handling performance spike: {e}")
+
+ @Slot(str)
+ def handle_device_switch_notification(self, device):
+ """Handle device switch notification"""
+ try:
+ print(f"🔄 Device switched to: {device}")
+ # Could update UI elements or show notification
+ except Exception as e:
+ print(f"❌ Error handling device switch notification: {e}")
+
+ @Slot(dict)
+ def handle_performance_data_update(self, performance_data):
+ """Handle performance data updates for other components"""
+ try:
+ # Could forward to other tabs or components that need performance data
+ if hasattr(self, 'analytics_tab'):
+ # Forward performance data to analytics if needed
+ pass
+ except Exception as e:
+ print(f"❌ Error handling performance data update: {e}")
def setupMenus(self):
"""Set up application menus"""
# File menu
@@ -284,16 +518,46 @@ class MainWindow(QMainWindow):
if not config:
return
- # Update config
- for section in config:
- if section in self.config:
- self.config[section].update(config[section])
- else:
- self.config[section] = config[section]
+ # Convert flat config to nested structure for model manager
+ nested_config = {
+ "detection": {}
+ }
- # Update model manager
+ # Map config panel values to model manager format
+ if 'device' in config:
+ nested_config["detection"]["device"] = config['device']
+ if 'model' in config:
+ # Convert YOLOv11x format to yolo11x format for model manager
+ model_name = config['model'].lower()
+ if 'yolov11' in model_name:
+ model_name = model_name.replace('yolov11', 'yolo11')
+ elif model_name == 'auto':
+ model_name = 'auto'
+ nested_config["detection"]["model"] = model_name
+ if 'confidence_threshold' in config:
+ nested_config["detection"]["confidence_threshold"] = config['confidence_threshold']
+ if 'iou_threshold' in config:
+ nested_config["detection"]["iou_threshold"] = config['iou_threshold']
+
+ print(f"🔧 Main Window: Applying config to model manager: {nested_config}")
+ print(f"🔧 Main Window: Received config from panel: {config}")
+
+ # Update config
+ for section in nested_config:
+ if section in self.config:
+ self.config[section].update(nested_config[section])
+ else:
+ self.config[section] = nested_config[section]
+
+ # Update model manager with nested config
if self.model_manager:
- self.model_manager.update_config(self.config)
+ self.model_manager.update_config(nested_config)
+
+ # Refresh model information in video controllers
+ if hasattr(self, 'video_controller') and self.video_controller:
+ self.video_controller.refresh_model_info()
+ if hasattr(self, 'video_file_controller') and self.video_file_controller:
+ self.video_file_controller.refresh_model_info()
# Save config to file
save_configuration(self.config, self.config_file)
@@ -302,7 +566,9 @@ class MainWindow(QMainWindow):
self.export_tab.update_config_display(self.config)
# Update status
- self.statusBar().showMessage("Configuration applied", 2000)
+ device = config.get('device', 'Unknown')
+ model = config.get('model', 'Unknown')
+ self.statusBar().showMessage(f"Configuration applied - Device: {device}, Model: {model}", 3000)
@Slot()
def load_config(self):
@@ -642,6 +908,7 @@ class MainWindow(QMainWindow):
confidence_str = f" (Confidence: {confidence:.2f})" if confidence > 0 else ""
else:
traffic_light_color = traffic_light_info
+ confidence = 1.0
confidence_str = ""
if traffic_light_color != 'unknown':
@@ -653,6 +920,16 @@ class MainWindow(QMainWindow):
else:
color_text = str(traffic_light_color).upper()
self.statusBar().showMessage(f"Traffic Light: {color_text}{confidence_str}")
+
+ # Publish traffic light status to InfluxDB
+ if hasattr(self, 'data_publisher') and self.data_publisher:
+ try:
+ color_for_publishing = traffic_light_color
+ if isinstance(traffic_light_color, dict):
+ color_for_publishing = traffic_light_color.get("color", "unknown")
+ self.data_publisher.publish_traffic_light_status(color_for_publishing, confidence)
+ except Exception as e:
+ print(f"❌ Error publishing traffic light status: {e}")
@Slot(dict)
def handle_violation_detected(self, violation):
"""Handle a detected traffic violation"""
@@ -663,9 +940,28 @@ class MainWindow(QMainWindow):
# Add to violations tab
self.violations_tab.add_violation(violation)
+ # Update analytics tab with violation data
+ if hasattr(self.analytics_tab, 'update_violation_data'):
+ self.analytics_tab.update_violation_data(violation)
+ print(f"[ANALYTICS DEBUG] Violation data forwarded to analytics tab")
+
# Update analytics
if self.analytics_controller:
self.analytics_controller.register_violation(violation)
+
+ # Publish violation to InfluxDB
+ if hasattr(self, 'data_publisher') and self.data_publisher:
+ try:
+ violation_type = violation.get('type', 'red_light_violation')
+ vehicle_id = violation.get('track_id', 'unknown')
+ details = {
+ 'timestamp': violation.get('timestamp', ''),
+ 'confidence': violation.get('confidence', 1.0),
+ 'location': violation.get('location', 'crosswalk')
+ }
+ self.data_publisher.publish_violation_event(violation_type, vehicle_id, details)
+ except Exception as e:
+ print(f"❌ Error publishing violation event: {e}")
print(f"🚨 Violation processed: {violation['id']} at {violation['timestamp']}")
except Exception as e:
@@ -678,10 +974,29 @@ class MainWindow(QMainWindow):
self.video_file_controller.set_source(file_path)
def _handle_video_play(self):
print("[VideoDetection] Play clicked")
- self.video_file_controller.play()
+ # Check if video is paused, if so resume, otherwise start
+ if hasattr(self.video_file_controller, '_paused') and self.video_file_controller._paused:
+ self.video_file_controller.resume()
+ else:
+ self.video_file_controller.play()
+ # Notify VLM insights that video is playing (not paused)
+ print("[MAIN WINDOW DEBUG] Notifying VLM insights: video playing")
+ if hasattr(self, 'config_panel') and hasattr(self.config_panel, 'vlm_insights_widget'):
+ self.config_panel.vlm_insights_widget.on_video_paused(False)
+ print("[MAIN WINDOW DEBUG] VLM insights notified: not paused")
+ else:
+ print("[MAIN WINDOW DEBUG] VLM insights not found for play notification")
+
def _handle_video_pause(self):
print("[VideoDetection] Pause clicked")
self.video_file_controller.pause()
+ # Notify VLM insights that video is paused
+ print("[MAIN WINDOW DEBUG] Notifying VLM insights: video paused")
+ if hasattr(self, 'config_panel') and hasattr(self.config_panel, 'vlm_insights_widget'):
+ self.config_panel.vlm_insights_widget.on_video_paused(True)
+ print("[MAIN WINDOW DEBUG] VLM insights notified: paused")
+ else:
+ print("[MAIN WINDOW DEBUG] VLM insights not found for pause notification")
def _handle_video_stop(self):
print("[VideoDetection] Stop clicked")
self.video_file_controller.stop()
@@ -727,24 +1042,368 @@ class MainWindow(QMainWindow):
self.statusBar().showMessage(f"Error switching device: {e}", 3000)
@Slot(dict)
def update_performance_graphs(self, stats):
- """Update the performance graphs using the new robust widget logic."""
+ """Update the performance graphs using the enhanced widget logic."""
if not hasattr(self, 'performance_tab'):
return
print(f"[PERF DEBUG] update_performance_graphs called with: {stats}")
+
+ # Publish performance data to InfluxDB
+ if hasattr(self, 'data_publisher') and self.data_publisher:
+ try:
+ fps = stats.get('fps', 0)
+ inference_time = stats.get('inference_time', 0)
+ cpu_usage = stats.get('cpu_usage', None)
+ gpu_usage = stats.get('gpu_usage', None)
+
+ self.data_publisher.publish_performance_data(fps, inference_time, cpu_usage, gpu_usage)
+
+ # Publish device info periodically (every 10th frame)
+ if hasattr(self, '_device_info_counter'):
+ self._device_info_counter += 1
+ else:
+ self._device_info_counter = 1
+
+ if self._device_info_counter % 10 == 0:
+ self.data_publisher.publish_device_info()
+ except Exception as e:
+ print(f"❌ Error publishing performance data: {e}")
+
+ # Enhanced analytics data with proper structure
+ current_time = time.time()
analytics_data = {
'real_time_data': {
- 'timestamps': [stats.get('frame_idx', 0)],
+ 'timestamps': [current_time],
'inference_latency': [stats.get('inference_time', 0)],
'fps': [stats.get('fps', 0)],
'device_usage': [1 if stats.get('device', 'CPU') == 'GPU' else 0],
'resolution_width': [int(stats.get('resolution', '640x360').split('x')[0]) if 'x' in stats.get('resolution', '') else 640],
'resolution_height': [int(stats.get('resolution', '640x360').split('x')[1]) if 'x' in stats.get('resolution', '') else 360],
- 'device_switches': [0] if stats.get('is_device_switch', False) else [],
- 'resolution_changes': [0] if stats.get('is_res_change', False) else [],
},
- 'latency_statistics': {},
- 'current_metrics': {},
- 'system_metrics': {},
+ 'latency_statistics': {
+ 'avg': stats.get('avg_inference_time', 0),
+ 'max': stats.get('max_inference_time', 0),
+ 'min': stats.get('min_inference_time', 0),
+ 'spike_count': stats.get('spike_count', 0)
+ },
+ 'current_metrics': {
+ 'device': stats.get('device', 'CPU'),
+ 'resolution': stats.get('resolution', 'Unknown'),
+ 'model': stats.get('model_name', stats.get('model', 'Unknown')), # Try model_name first, then model
+ 'fps': stats.get('fps', 0),
+ 'inference_time': stats.get('inference_time', 0)
+ },
+ 'system_metrics': {
+ 'cpu_usage': stats.get('cpu_usage', 0),
+ 'gpu_usage': stats.get('gpu_usage', 0),
+ 'memory_usage': stats.get('memory_usage', 0)
+ }
}
- print(f"[PERF DEBUG] analytics_data for update_performance_data: {analytics_data}")
+
+ print(f"[PERF DEBUG] Enhanced analytics_data: {analytics_data}")
+
+ # Update performance graphs with enhanced data
self.performance_tab.update_performance_data(analytics_data)
+
+ def _handle_vlm_analysis(self, frame, prompt):
+ """Handle VLM analysis requests."""
+ print(f"[MAIN WINDOW DEBUG] _handle_vlm_analysis called")
+ print(f"[MAIN WINDOW DEBUG] Frame type: {type(frame)}, shape: {frame.shape if hasattr(frame, 'shape') else 'N/A'}")
+ print(f"[MAIN WINDOW DEBUG] Prompt: '{prompt}'")
+
+ try:
+ # Check if VLM controller is available
+ if hasattr(self, 'vlm_controller') and self.vlm_controller:
+ print(f"[MAIN WINDOW DEBUG] Using VLM controller for analysis")
+
+ # Connect VLM result to insights widget if not already connected
+ if not hasattr(self, '_vlm_connected'):
+ print(f"[MAIN WINDOW DEBUG] Connecting VLM controller results to insights widget")
+ self.vlm_controller.result_ready.connect(
+ lambda result: self._handle_vlm_result(result),
+ Qt.QueuedConnection
+ )
+ self._vlm_connected = True
+
+ # Process image with VLM controller
+ self.vlm_controller.process_image(frame, prompt)
+ print(f"[MAIN WINDOW DEBUG] VLM controller processing started")
+
+ else:
+ print(f"[MAIN WINDOW DEBUG] VLM controller not available, using mock analysis")
+ # Fallback to mock analysis
+ import cv2
+ import numpy as np
+ result = self._generate_mock_analysis(frame, prompt)
+ print(f"[MAIN WINDOW DEBUG] Mock analysis generated: {len(result)} characters")
+
+ # Send result back to VLM insights widget
+ if hasattr(self.config_panel, 'vlm_insights_widget'):
+ print(f"[MAIN WINDOW DEBUG] Sending mock result to VLM insights widget")
+ self.config_panel.vlm_insights_widget.on_analysis_result(result)
+ print(f"[MAIN WINDOW DEBUG] Mock result sent successfully")
+ else:
+ print(f"[MAIN WINDOW DEBUG] VLM insights widget not found")
+
+ except Exception as e:
+ print(f"[VLM ERROR] Error in analysis: {e}")
+ if hasattr(self.config_panel, 'vlm_insights_widget'):
+ self.config_panel.vlm_insights_widget.on_analysis_result(f"Analysis error: {str(e)}")
+
+ def _handle_vlm_result(self, result):
+ """Handle VLM controller results."""
+ print(f"[MAIN WINDOW DEBUG] _handle_vlm_result called")
+ print(f"[MAIN WINDOW DEBUG] Result type: {type(result)}")
+
+ try:
+ # Extract answer from result dict
+ if isinstance(result, dict):
+ if 'response' in result:
+ answer = result['response']
+ print(f"[MAIN WINDOW DEBUG] Extracted response: {len(str(answer))} characters")
+ elif 'answer' in result:
+ answer = result['answer']
+ print(f"[MAIN WINDOW DEBUG] Extracted answer: {len(str(answer))} characters")
+ else:
+ answer = str(result)
+ print(f"[MAIN WINDOW DEBUG] Using result as string: {len(answer)} characters")
+ else:
+ answer = str(result)
+ print(f"[MAIN WINDOW DEBUG] Using result as string: {len(answer)} characters")
+
+ # Send result to VLM insights widget
+ if hasattr(self.config_panel, 'vlm_insights_widget'):
+ print(f"[MAIN WINDOW DEBUG] Sending VLM result to insights widget")
+ self.config_panel.vlm_insights_widget.on_analysis_result(answer)
+ print(f"[MAIN WINDOW DEBUG] VLM result sent successfully")
+ else:
+ print(f"[MAIN WINDOW DEBUG] VLM insights widget not found")
+
+ except Exception as e:
+ print(f"[VLM ERROR] Error handling VLM result: {e}")
+
+ def _forward_frame_to_vlm(self, frame, detections, fps):
+ """Forward frame to VLM insights widget."""
+ print(f"[MAIN WINDOW DEBUG] _forward_frame_to_vlm called")
+ print(f"[MAIN WINDOW DEBUG] Frame type: {type(frame)}, shape: {frame.shape if hasattr(frame, 'shape') else 'N/A'}")
+ print(f"[MAIN WINDOW DEBUG] Detections count: {len(detections) if detections else 0}")
+ print(f"[MAIN WINDOW DEBUG] FPS: {fps}")
+
+ # Publish detection events to InfluxDB
+ if hasattr(self, 'data_publisher') and self.data_publisher and detections:
+ try:
+ # Count vehicles and pedestrians
+ vehicle_count = 0
+ pedestrian_count = 0
+
+ for detection in detections:
+ label = ""
+ if isinstance(detection, dict):
+ label = detection.get('label', '').lower()
+ elif hasattr(detection, 'label'):
+ label = getattr(detection, 'label', '').lower()
+ elif hasattr(detection, 'class_name'):
+ label = getattr(detection, 'class_name', '').lower()
+ elif hasattr(detection, 'cls'):
+ label = str(getattr(detection, 'cls', '')).lower()
+
+ # Debug the label detection
+ if label and label != 'traffic light':
+ print(f"[PUBLISHER DEBUG] Detected object: {label}")
+
+ if label in ['car', 'truck', 'bus', 'motorcycle', 'vehicle']:
+ vehicle_count += 1
+ elif label in ['person', 'pedestrian']:
+ pedestrian_count += 1
+
+ # Also try to get vehicle count from tracked vehicles if available
+ if vehicle_count == 0 and hasattr(self, 'video_file_controller'):
+ try:
+ # Try to get vehicle count from current analysis data
+ analysis_data = getattr(self.video_file_controller, 'get_current_analysis_data', lambda: {})()
+ if isinstance(analysis_data, dict):
+ tracked_vehicles = analysis_data.get('tracked_vehicles', [])
+ if tracked_vehicles:
+ vehicle_count = len(tracked_vehicles)
+ print(f"[PUBLISHER DEBUG] Using tracked vehicle count: {vehicle_count}")
+ except:
+ pass
+
+ self.data_publisher.publish_detection_events(vehicle_count, pedestrian_count)
+ except Exception as e:
+ print(f"❌ Error publishing detection events: {e}")
+
+ try:
+ if hasattr(self.config_panel, 'vlm_insights_widget'):
+ print(f"[MAIN WINDOW DEBUG] Forwarding frame to VLM insights widget")
+ self.config_panel.vlm_insights_widget.set_current_frame(frame)
+
+ # Store detection data for VLM analysis
+ if hasattr(self.config_panel.vlm_insights_widget, 'set_detection_data'):
+ print(f"[MAIN WINDOW DEBUG] Setting detection data for VLM")
+ detection_data = {
+ 'detections': detections,
+ 'fps': fps,
+ 'timestamp': time.time()
+ }
+ # Get additional data from video controller if available
+ if hasattr(self.video_file_controller, 'get_current_analysis_data'):
+ analysis_data = self.video_file_controller.get_current_analysis_data()
+ detection_data.update(analysis_data)
+
+ self.config_panel.vlm_insights_widget.set_detection_data(detection_data)
+ print(f"[MAIN WINDOW DEBUG] Detection data set successfully")
+
+ print(f"[MAIN WINDOW DEBUG] Frame forwarded successfully")
+ else:
+ print(f"[MAIN WINDOW DEBUG] VLM insights widget not found for frame forwarding")
+ except Exception as e:
+ print(f"[MAIN WINDOW DEBUG] Error forwarding frame to VLM: {e}")
+
+ def _forward_frame_to_analytics(self, frame, detections, fps):
+ """Forward frame data to analytics tab for real-time updates."""
+ try:
+ print(f"[ANALYTICS DEBUG] Forwarding frame data to analytics tab")
+ print(f"[ANALYTICS DEBUG] Detections count: {len(detections) if detections else 0}")
+
+ # Prepare detection data for analytics
+ detection_data = {
+ 'detections': detections,
+ 'fps': fps,
+ 'timestamp': time.time(),
+ 'frame_shape': frame.shape if hasattr(frame, 'shape') else None
+ }
+
+ # Get additional analysis data from video controller
+ if hasattr(self.video_file_controller, 'get_current_analysis_data'):
+ analysis_data = self.video_file_controller.get_current_analysis_data()
+ if analysis_data:
+ detection_data.update(analysis_data)
+ print(f"[ANALYTICS DEBUG] Updated with analysis data: {list(analysis_data.keys())}")
+
+ # Forward to analytics tab
+ if hasattr(self.analytics_tab, 'update_detection_data'):
+ self.analytics_tab.update_detection_data(detection_data)
+ print(f"[ANALYTICS DEBUG] Detection data forwarded to analytics tab successfully")
+ else:
+ print(f"[ANALYTICS DEBUG] Analytics tab update_detection_data method not found")
+
+ except Exception as e:
+ print(f"[ANALYTICS DEBUG] Error forwarding frame to analytics: {e}")
+ import traceback
+ traceback.print_exc()
+
+ def _generate_mock_analysis(self, frame, prompt):
+ """Generate a mock analysis response based on frame content and prompt."""
+ try:
+ import cv2
+ import numpy as np
+
+ # Analyze frame properties
+ h, w = frame.shape[:2] if frame is not None else (0, 0)
+
+ # Basic image analysis
+ analysis_parts = []
+
+ if "traffic" in prompt.lower():
+ analysis_parts.append("🚦 Traffic Analysis:")
+ analysis_parts.append(f"• Frame resolution: {w}x{h}")
+ analysis_parts.append("• Detected scene: Urban traffic intersection")
+ analysis_parts.append("• Visible elements: Road, potential vehicles")
+ analysis_parts.append("• Traffic flow appears to be moderate")
+
+ elif "safety" in prompt.lower():
+ analysis_parts.append("⚠️ Safety Assessment:")
+ analysis_parts.append("• Monitoring for traffic violations")
+ analysis_parts.append("• Checking lane discipline")
+ analysis_parts.append("• Observing traffic light compliance")
+ analysis_parts.append("• Overall safety level: Monitoring required")
+
+ else:
+ analysis_parts.append("🔍 General Analysis:")
+ analysis_parts.append(f"• Image dimensions: {w}x{h} pixels")
+ analysis_parts.append("• Scene type: Traffic monitoring view")
+ analysis_parts.append("• Quality: Processing frame for analysis")
+ analysis_parts.append(f"• Prompt: {prompt[:100]}...")
+
+ # Add timestamp and disclaimer
+ from datetime import datetime
+ timestamp = datetime.now().strftime("%H:%M:%S")
+ analysis_parts.append(f"\n📝 Analysis completed at {timestamp}")
+ analysis_parts.append("ℹ️ Note: This is a mock analysis. Full AI analysis requires compatible OpenVINO setup.")
+
+ return "\n".join(analysis_parts)
+
+ except Exception as e:
+ return f"Unable to analyze frame: {str(e)}"
+
+ # Smart Intersection Signal Handlers
+ @Slot(bool)
+ def _handle_smart_intersection_enabled(self, enabled):
+ """Handle smart intersection mode toggle"""
+ print(f"🚦 Smart Intersection mode {'enabled' if enabled else 'disabled'}")
+
+ if self.smart_intersection_controller:
+ self.smart_intersection_controller.set_enabled(enabled)
+
+ # Update status
+ if enabled:
+ self.statusBar().showMessage("Smart Intersection mode activated")
+ else:
+ self.statusBar().showMessage("Standard detection mode")
+
+ @Slot(bool)
+ def _handle_multi_camera_mode(self, enabled):
+ """Handle multi-camera mode toggle"""
+ print(f"📹 Multi-camera mode {'enabled' if enabled else 'disabled'}")
+
+ if self.smart_intersection_controller:
+ self.smart_intersection_controller.set_multi_camera_mode(enabled)
+
+ @Slot(dict)
+ def _handle_roi_configuration_changed(self, roi_config):
+ """Handle ROI configuration changes"""
+ print(f"🎯 ROI configuration updated: {len(roi_config.get('rois', []))} regions")
+
+ if self.smart_intersection_controller:
+ self.smart_intersection_controller.update_roi_config(roi_config)
+
+ @Slot(bool)
+ def _handle_scene_analytics_toggle(self, enabled):
+ """Handle scene analytics toggle"""
+ print(f"📊 Scene analytics {'enabled' if enabled else 'disabled'}")
+
+ if self.smart_intersection_controller:
+ self.smart_intersection_controller.set_scene_analytics(enabled)
+
+ @Slot(dict)
+ def _handle_scene_analytics_update(self, analytics_data):
+ """Handle scene analytics data updates"""
+ try:
+ # Update video detection tab with smart intersection data
+ smart_stats = {
+ 'total_objects': analytics_data.get('total_objects', 0),
+ 'active_tracks': analytics_data.get('active_tracks', 0),
+ 'roi_events': analytics_data.get('roi_events', 0),
+ 'crosswalk_events': analytics_data.get('crosswalk_events', 0),
+ 'lane_events': analytics_data.get('lane_events', 0),
+ 'safety_events': analytics_data.get('safety_events', 0),
+ 'north_objects': analytics_data.get('camera_stats', {}).get('north', 0),
+ 'east_objects': analytics_data.get('camera_stats', {}).get('east', 0),
+ 'south_objects': analytics_data.get('camera_stats', {}).get('south', 0),
+ 'west_objects': analytics_data.get('camera_stats', {}).get('west', 0),
+ 'fps': analytics_data.get('fps', 0),
+ 'processing_time': analytics_data.get('processing_time_ms', 0),
+ 'gpu_usage': analytics_data.get('gpu_usage', 0),
+ 'memory_usage': analytics_data.get('memory_usage', 0)
+ }
+
+ # Update both video tabs with stats
+ self.video_detection_only_tab.update_stats(smart_stats)
+ self.smart_intersection_tab.update_stats(smart_stats)
+
+ # Update analytics tab if it has smart intersection support
+ if hasattr(self.analytics_tab, 'update_smart_intersection_analytics'):
+ self.analytics_tab.update_smart_intersection_analytics(analytics_data)
+
+ except Exception as e:
+ print(f"Error handling scene analytics update: {e}")
diff --git a/qt_app_pyside1/ui/performance_graphs.py b/qt_app_pyside1/ui/performance_graphs.py
index 6d18167..08369c3 100644
--- a/qt_app_pyside1/ui/performance_graphs.py
+++ b/qt_app_pyside1/ui/performance_graphs.py
@@ -5,16 +5,25 @@ Shows when latency spikes occur with different resolutions and devices
from PySide6.QtWidgets import (
QWidget, QVBoxLayout, QHBoxLayout, QLabel,
- QGroupBox, QTabWidget, QFrame, QSplitter
+ QGroupBox, QTabWidget, QFrame, QSplitter, QScrollArea
)
from PySide6.QtCore import Qt, QTimer, Signal, Slot
-from PySide6.QtGui import QPainter, QPen, QBrush, QColor, QFont
+from PySide6.QtGui import QPainter, QPen, QBrush, QColor, QFont, QLinearGradient
import numpy as np
from collections import deque
from typing import Dict, List, Any
+import time
+
+# Try to import psutil for system monitoring, use fallback if not available
+try:
+ import psutil
+ PSUTIL_AVAILABLE = True
+except ImportError:
+ PSUTIL_AVAILABLE = False
+ print("⚠️ psutil not available - system monitoring will use fallback values")
class RealTimeGraph(QWidget):
- """Custom widget for drawing real-time graphs"""
+ """Custom widget for drawing real-time graphs with enhanced styling"""
def __init__(self, title: str = "Graph", y_label: str = "Value", max_points: int = 300):
super().__init__()
@@ -29,20 +38,75 @@ class RealTimeGraph(QWidget):
self.device_markers = deque(maxlen=max_points) # Mark device changes
self.resolution_markers = deque(maxlen=max_points) # Mark resolution changes
- # Graph settings
- self.margin = 40
- self.grid_color = QColor(60, 60, 60)
- self.line_color = QColor(0, 255, 255) # Cyan
- self.spike_color = QColor(255, 0, 0) # Red for spikes
- self.cpu_color = QColor(100, 150, 255) # Blue for CPU
- self.gpu_color = QColor(255, 150, 100) # Orange for GPU
+ # Enhanced styling colors
+ self.bg_color = QColor(18, 18, 18) # Very dark background
+ self.grid_color = QColor(40, 40, 45) # Subtle grid
+ self.line_color = QColor(0, 230, 255) # Bright cyan
+ self.spike_color = QColor(255, 77, 77) # Bright red for spikes
+ self.cpu_color = QColor(120, 180, 255) # Light blue for CPU
+ self.gpu_color = QColor(255, 165, 0) # Orange for GPU
+ self.text_color = QColor(220, 220, 220) # Light gray text
+ self.accent_color = QColor(255, 215, 0) # Gold accent
# Auto-scaling
self.y_min = 0
self.y_max = 100
self.auto_scale = True
+ # Performance counters
+ self.spike_count = 0
+ self.device_switches = 0
+ self.resolution_changes = 0
+
self.setMinimumSize(400, 200)
+ self.setStyleSheet("""
+ QWidget {
+ background-color: #121212;
+ border: 1px solid #2a2a2a;
+ border-radius: 8px;
+ }
+ """)
+
+ def add_data_point(self, x: float, y: float, is_spike: bool = False, device: str = "CPU", is_res_change: bool = False):
+ """Add a new data point to the graph"""
+ self.x_data.append(x)
+ self.y_data.append(y)
+ self.spike_markers.append(is_spike)
+ self.device_markers.append(device)
+ self.resolution_markers.append(is_res_change)
+
+ # Update counters
+ if is_spike:
+ self.spike_count += 1
+ if len(self.device_markers) > 1 and device != list(self.device_markers)[-2]:
+ self.device_switches += 1
+ if is_res_change:
+ self.resolution_changes += 1
+
+ # Auto-scale Y axis with better algorithm
+ if self.auto_scale and self.y_data:
+ data_max = max(self.y_data)
+ data_min = min(self.y_data)
+ if data_max > data_min:
+ padding = (data_max - data_min) * 0.15
+ self.y_max = data_max + padding
+ self.y_min = max(0, data_min - padding * 0.5)
+ else:
+ self.y_max = data_max + 10 if data_max > 0 else 100
+ self.y_min = 0
+ self.update()
+
+ def clear_data(self):
+ """Clear the graph data"""
+ self.x_data.clear()
+ self.y_data.clear()
+ self.spike_markers.clear()
+ self.device_markers.clear()
+ self.resolution_markers.clear()
+ self.spike_count = 0
+ self.device_switches = 0
+ self.resolution_changes = 0
+ self.update()
def add_data_point(self, x: float, y: float, is_spike: bool = False, device: str = "CPU", is_res_change: bool = False):
"""Add a new data point to the graph"""
@@ -71,133 +135,479 @@ class RealTimeGraph(QWidget):
self.update()
def paintEvent(self, event):
- """Override paint event to draw the graph"""
+ """Override paint event to draw the graph with enhanced styling"""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
width = self.width()
height = self.height()
- graph_width = width - 2 * self.margin
- graph_height = height - 2 * self.margin
+ margin = 50
+ graph_width = width - 2 * margin
+ graph_height = height - 2 * margin
- # Background
- painter.fillRect(self.rect(), QColor(30, 30, 30))
+ # Enhanced background with subtle gradient
+ gradient = QLinearGradient(0, 0, 0, height)
+ gradient.setColorAt(0, QColor(25, 25, 30))
+ gradient.setColorAt(1, QColor(15, 15, 20))
+ painter.fillRect(self.rect(), QBrush(gradient))
- # Title
- painter.setPen(QColor(255, 255, 255))
- painter.setFont(QFont("Arial", 12, QFont.Bold))
- painter.drawText(10, 20, self.title)
+ # Title with glow effect
+ painter.setPen(self.accent_color)
+ painter.setFont(QFont("Segoe UI", 13, QFont.Bold))
+ title_rect = painter.fontMetrics().boundingRect(self.title)
+ painter.drawText(15, 25, self.title)
- # Axes
- painter.setPen(QPen(QColor(200, 200, 200), 2))
- painter.drawLine(self.margin, self.margin, self.margin, height - self.margin)
- painter.drawLine(self.margin, height - self.margin, width - self.margin, height - self.margin)
+ # Enhanced axes with better styling
+ painter.setPen(QPen(self.text_color, 2))
+ painter.drawLine(margin, margin, margin, height - margin) # Y-axis
+ painter.drawLine(margin, height - margin, width - margin, height - margin) # X-axis
- # Grid
- painter.setPen(QPen(self.grid_color, 1))
- for i in range(5):
- y = self.margin + (graph_height * i / 4)
- painter.drawLine(self.margin, y, width - self.margin, y)
- for i in range(10):
- x = self.margin + (graph_width * i / 9)
- painter.drawLine(x, self.margin, x, height - self.margin)
+ # Enhanced grid with subtle styling
+ painter.setPen(QPen(self.grid_color, 1, Qt.DotLine))
+ # Horizontal grid lines
+ for i in range(1, 5):
+ y = margin + (graph_height * i / 4)
+ painter.drawLine(margin + 5, y, width - margin - 5, y)
+ # Vertical grid lines
+ for i in range(1, 10):
+ x = margin + (graph_width * i / 9)
+ painter.drawLine(x, margin + 5, x, height - margin - 5)
- # Y-axis labels
- painter.setPen(QColor(200, 200, 200))
- painter.setFont(QFont("Arial", 8))
+ # Enhanced Y-axis labels with better formatting
+ painter.setPen(self.text_color)
+ painter.setFont(QFont("Segoe UI", 9))
for i in range(5):
y_val = self.y_min + (self.y_max - self.y_min) * (4 - i) / 4
- y_pos = self.margin + (graph_height * i / 4)
- painter.drawText(5, y_pos + 5, f"{y_val:.1f}")
+ y_pos = margin + (graph_height * i / 4)
+ if y_val >= 1000:
+ label = f"{y_val/1000:.1f}k"
+ elif y_val >= 1:
+ label = f"{y_val:.1f}"
+ else:
+ label = f"{y_val:.2f}"
+ painter.drawText(5, y_pos + 4, label)
- # X-axis label
+ # Enhanced Y-axis label with rotation
painter.save()
- painter.translate(15, height // 2)
+ painter.setPen(self.text_color)
+ painter.setFont(QFont("Segoe UI", 10))
+ painter.translate(20, height // 2)
painter.rotate(-90)
- painter.drawText(-len(self.y_label) * 3, 0, self.y_label)
+ painter.drawText(-len(self.y_label) * 4, 0, self.y_label)
painter.restore()
- # Data points
+ # Enhanced data visualization
if len(self.x_data) >= 2 and len(self.y_data) >= 2:
points = []
spike_points = []
device_changes = []
res_changes = []
+
x_min = min(self.x_data) if self.x_data else 0
x_max = max(self.x_data) if self.x_data else 1
x_range = x_max - x_min if x_max > x_min else 1
+
+ # Prepare point coordinates
for i, (x_val, y_val, is_spike, device, is_res_change) in enumerate(zip(
self.x_data, self.y_data, self.spike_markers, self.device_markers, self.resolution_markers
)):
- x_screen = self.margin + (x_val - x_min) / x_range * graph_width
- y_screen = height - self.margin - (y_val - self.y_min) / (self.y_max - self.y_min) * graph_height
+ x_screen = margin + (x_val - x_min) / x_range * graph_width
+ y_screen = height - margin - (y_val - self.y_min) / (self.y_max - self.y_min) * graph_height
points.append((x_screen, y_screen))
+
if is_spike:
spike_points.append((x_screen, y_screen))
if i > 0 and device != list(self.device_markers)[i-1]:
device_changes.append((x_screen, y_screen, device))
if is_res_change:
res_changes.append((x_screen, y_screen))
+
+ # Draw main line with enhanced styling
if len(points) >= 2:
- painter.setPen(QPen(self.line_color, 2))
+ painter.setPen(QPen(self.line_color, 3))
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
painter.drawLine(x1, y1, x2, y2)
- painter.setPen(QPen(self.spike_color, 3))
+
+ # Add subtle glow effect to the line
+ painter.setPen(QPen(QColor(self.line_color.red(), self.line_color.green(), self.line_color.blue(), 60), 6))
+ for i in range(len(points) - 1):
+ x1, y1 = points[i]
+ x2, y2 = points[i + 1]
+ painter.drawLine(x1, y1, x2, y2)
+
+ # Enhanced spike markers
+ painter.setPen(QPen(self.spike_color, 2))
painter.setBrush(QBrush(self.spike_color))
for x, y in spike_points:
- painter.drawEllipse(x - 3, y - 3, 6, 6)
+ painter.drawEllipse(x - 4, y - 4, 8, 8)
+ # Add spike indicator line
+ painter.drawLine(x, y - 10, x, y + 10)
+
+ # Enhanced device change indicators
for x, y, device in device_changes:
color = self.gpu_color if device == "GPU" else self.cpu_color
- painter.setPen(QPen(color, 2))
- painter.setBrush(QBrush(color))
- painter.drawRect(x - 2, self.margin, 4, graph_height)
+ painter.setPen(QPen(color, 3))
+ painter.setBrush(QBrush(QColor(color.red(), color.green(), color.blue(), 100)))
+ painter.drawRect(x - 3, margin, 6, graph_height)
+
+ # Add device label
+ painter.setPen(color)
+ painter.setFont(QFont("Segoe UI", 8, QFont.Bold))
+ painter.drawText(x - 10, margin - 5, device)
+
+ # Enhanced resolution change indicators
for x, y in res_changes:
- painter.setPen(QPen(QColor(255, 167, 38), 2)) # Orange for resolution change
- painter.drawLine(x, self.margin, x, height - self.margin)
+ painter.setPen(QPen(QColor(255, 193, 7), 2)) # Amber color
+ painter.drawLine(x, margin, x, height - margin)
+
+ # Add resolution change marker
+ painter.setBrush(QBrush(QColor(255, 193, 7)))
+ painter.drawEllipse(x - 3, margin - 5, 6, 6)
class PerformanceGraphsWidget(QWidget):
+ """Enhanced performance graphs widget with real-time data visualization"""
+
+ # Define signals for better integration
+ performance_data_updated = Signal(dict)
+ spike_detected = Signal(dict)
+ device_switched = Signal(str)
+
def __init__(self):
super().__init__()
self.setup_ui()
+
+ # Enhanced timer setup
self.update_timer = QTimer()
self.update_timer.timeout.connect(self.update_graphs)
+ self.system_timer = QTimer()
+ self.system_timer.timeout.connect(self.update_system_metrics)
+
try:
- self.update_timer.start(1000)
+ self.update_timer.start(500) # Update graphs every 500ms for smoother animation
+ self.system_timer.start(1000) # Update system metrics every second
except Exception as e:
- print(f"❌ Error starting performance graph timer: {e}")
- self.start_time = None
+ print(f"❌ Error starting performance graph timers: {e}")
+
+ # Enhanced data tracking
+ self.start_time = time.time() if time else None
self.latest_data = {}
self.cpu_usage_history = deque(maxlen=300)
+ self.ram_usage_history = deque(maxlen=300) # Add missing ram_usage_history
+ self.frame_counter = 0
+ self.spike_threshold = 100.0 # Default spike threshold in ms
+ self.previous_device = "CPU" # Track device changes
+
+ # Performance statistics
+ self.latency_stats = {
+ 'avg': 0.0,
+ 'max': 0.0,
+ 'min': float('inf'),
+ 'spike_count': 0
+ }
+
+ def __del__(self):
+ """Clean up timers when widget is destroyed"""
+ try:
+ if hasattr(self, 'system_timer') and self.system_timer:
+ self.system_timer.stop()
+ self.system_timer.deleteLater()
+ if hasattr(self, 'update_timer') and self.update_timer:
+ self.update_timer.stop()
+ self.update_timer.deleteLater()
+ except:
+ pass
+
+ def closeEvent(self, event):
+ """Handle widget close event"""
+ try:
+ if hasattr(self, 'system_timer') and self.system_timer:
+ self.system_timer.stop()
+ if hasattr(self, 'update_timer') and self.update_timer:
+ self.update_timer.stop()
+ except:
+ pass
+ super().closeEvent(event)
self.ram_usage_history = deque(maxlen=300)
+ self.spike_threshold = 100 # ms threshold for latency spikes
+ self.previous_device = "CPU"
+ self.frame_counter = 0
+
+ # Performance statistics
+ self.latency_stats = {
+ 'avg': 0.0,
+ 'max': 0.0,
+ 'min': float('inf'),
+ 'spike_count': 0
+ }
+
+ self.setStyleSheet("""
+ QWidget {
+ background-color: #121212;
+ color: #ffffff;
+ }
+ QLabel {
+ color: #ffffff;
+ background: transparent;
+ }
+ QFrame {
+ background-color: #1a1a1a;
+ border: 1px solid #333333;
+ border-radius: 8px;
+ margin: 2px;
+ }
+ """)
+
def setup_ui(self):
- layout = QVBoxLayout(self)
+ # Create main layout
+ main_layout = QVBoxLayout(self)
+ main_layout.setContentsMargins(5, 5, 5, 5)
+ main_layout.setSpacing(0)
+
+ # Create scroll area
+ scroll_area = QScrollArea()
+ scroll_area.setWidgetResizable(True)
+ scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
+ scroll_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
+ scroll_area.setStyleSheet("""
+ QScrollArea {
+ border: none;
+ background-color: #121212;
+ }
+ QScrollBar:vertical {
+ background-color: #2C2C2C;
+ width: 12px;
+ border-radius: 6px;
+ }
+ QScrollBar::handle:vertical {
+ background-color: #555555;
+ border-radius: 6px;
+ min-height: 20px;
+ }
+ QScrollBar::handle:vertical:hover {
+ background-color: #777777;
+ }
+ QScrollBar:horizontal {
+ background-color: #2C2C2C;
+ height: 12px;
+ border-radius: 6px;
+ }
+ QScrollBar::handle:horizontal {
+ background-color: #555555;
+ border-radius: 6px;
+ min-width: 20px;
+ }
+ QScrollBar::handle:horizontal:hover {
+ background-color: #777777;
+ }
+ """)
+
+ # Create scrollable content widget
+ content_widget = QWidget()
+ content_layout = QVBoxLayout(content_widget)
+ content_layout.setContentsMargins(10, 10, 10, 10)
+ content_layout.setSpacing(8)
+
+ # Enhanced title section
+ title_frame = QFrame()
+ title_layout = QVBoxLayout(title_frame)
+
title_label = QLabel("🔥 Real-Time Inference Performance & Latency Spike Analysis")
- title_label.setStyleSheet("font-size: 16px; font-weight: bold; color: #FFD700; margin: 10px;")
- layout.addWidget(title_label)
+ title_label.setStyleSheet("""
+ font-size: 18px;
+ font-weight: bold;
+ color: #FFD700;
+ margin: 8px;
+ background: qlineargradient(x1:0, y1:0, x2:1, y2:0,
+ stop:0 #FFD700, stop:1 #FFA500);
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+ """)
+ title_layout.addWidget(title_label)
+
+ # Enhanced system stats
+ stats_layout = QHBoxLayout()
+
self.cpu_ram_stats = QLabel("CPU: 0% | RAM: 0%")
- self.cpu_ram_stats.setStyleSheet("color: #FFD700; font-weight: bold; font-size: 14px; margin: 8px;")
- layout.addWidget(self.cpu_ram_stats)
+ self.cpu_ram_stats.setStyleSheet("""
+ color: #00FFFF;
+ font-weight: bold;
+ font-size: 14px;
+ margin: 4px 8px;
+ padding: 4px 8px;
+ background-color: rgba(0, 255, 255, 0.1);
+ border-radius: 4px;
+ """)
+ stats_layout.addWidget(self.cpu_ram_stats)
+
+ # Add current model display
+ self.current_model_stats = QLabel("Model: Loading...")
+ self.current_model_stats.setStyleSheet("""
+ color: #FFD700;
+ font-weight: bold;
+ font-size: 14px;
+ margin: 4px 8px;
+ padding: 4px 8px;
+ background-color: rgba(255, 215, 0, 0.1);
+ border-radius: 4px;
+ """)
+ stats_layout.addWidget(self.current_model_stats)
+
+ title_layout.addLayout(stats_layout)
+
+ title_frame.setLayout(title_layout)
+ content_layout.addWidget(title_frame)
+
+ # Enhanced splitter for graphs - set minimum sizes to avoid cluttering
splitter = QSplitter(Qt.Vertical)
- # Latency graph
+ splitter.setStyleSheet("""
+ QSplitter::handle {
+ background-color: #333333;
+ height: 3px;
+ }
+ QSplitter::handle:hover {
+ background-color: #555555;
+ }
+ """)
+
+ # Enhanced Latency graph
latency_frame = QFrame()
+ latency_frame.setMinimumHeight(250) # Set minimum height to prevent cluttering
+ latency_frame.setStyleSheet("""
+ QFrame {
+ background: qlineargradient(x1:0, y1:0, x2:0, y2:1,
+ stop:0 rgba(30, 30, 35, 255),
+ stop:1 rgba(20, 20, 25, 255));
+ border: 2px solid #00FFFF;
+ border-radius: 10px;
+ }
+ """)
latency_layout = QVBoxLayout(latency_frame)
+
self.latency_graph = RealTimeGraph(
"Inference Latency Over Time",
"Latency (ms)",
max_points=300
)
+ self.latency_graph.setMinimumHeight(200) # Ensure minimum display height
latency_layout.addWidget(self.latency_graph)
+
latency_info = QHBoxLayout()
- self.latency_stats = QLabel("Avg: 0ms | Max: 0ms | Spikes: 0")
- self.latency_stats.setStyleSheet("color: #00FFFF; font-weight: bold;")
- latency_info.addWidget(self.latency_stats)
+ self.latency_stats_label = QLabel("Avg: 0ms | Max: 0ms | Spikes: 0")
+ self.latency_stats_label.setStyleSheet("""
+ color: #00FFFF;
+ font-weight: bold;
+ font-size: 12px;
+ padding: 4px 8px;
+ background-color: rgba(0, 255, 255, 0.15);
+ border-radius: 4px;
+ margin: 4px;
+ """)
+ latency_info.addWidget(self.latency_stats_label)
latency_info.addStretch()
latency_layout.addLayout(latency_info)
+
latency_frame.setLayout(latency_layout)
splitter.addWidget(latency_frame)
- # FPS graph
+
+ # Enhanced FPS graph
fps_frame = QFrame()
+ fps_frame.setMinimumHeight(250) # Set minimum height to prevent cluttering
+ fps_frame.setStyleSheet("""
+ QFrame {
+ background: qlineargradient(x1:0, y1:0, x2:0, y2:1,
+ stop:0 rgba(30, 35, 30, 255),
+ stop:1 rgba(20, 25, 20, 255));
+ border: 2px solid #00FF00;
+ border-radius: 10px;
+ }
+ """)
+ fps_layout = QVBoxLayout(fps_frame)
+
+ self.fps_graph = RealTimeGraph(
+ "FPS & Resolution Impact",
+ "FPS",
+ max_points=300
+ )
+ self.fps_graph.setMinimumHeight(200) # Ensure minimum display height
+ fps_layout.addWidget(self.fps_graph)
+
+ fps_info = QHBoxLayout()
+ self.fps_stats = QLabel("Current FPS: 0 | Resolution: - | Device: -")
+ self.fps_stats.setStyleSheet("""
+ color: #00FF00;
+ font-weight: bold;
+ font-size: 12px;
+ padding: 4px 8px;
+ background-color: rgba(0, 255, 0, 0.15);
+ border-radius: 4px;
+ margin: 4px;
+ """)
+ fps_info.addWidget(self.fps_stats)
+ fps_info.addStretch()
+ fps_layout.addLayout(fps_info)
+
+ fps_frame.setLayout(fps_layout)
+ splitter.addWidget(fps_frame)
+
+ # Enhanced Device switching & resolution changes graph
+ device_frame = QFrame()
+ device_frame.setMinimumHeight(220) # Set minimum height to prevent cluttering
+ device_frame.setStyleSheet("""
+ QFrame {
+ background: qlineargradient(x1:0, y1:0, x2:0, y2:1,
+ stop:0 rgba(35, 30, 30, 255),
+ stop:1 rgba(25, 20, 20, 255));
+ border: 2px solid #FFB300;
+ border-radius: 10px;
+ }
+ """)
+ device_layout = QVBoxLayout(device_frame)
+
+ self.device_graph = RealTimeGraph(
+ "Device Switching & Resolution Changes",
+ "Events",
+ max_points=300
+ )
+ self.device_graph.setMinimumHeight(170) # Ensure minimum display height
+ device_layout.addWidget(self.device_graph)
+
+ self.device_legend = QLabel(
+ "● CPU Spikes: 0 | "
+ "● GPU Spikes: 0 | "
+ "● Switches: 0 | "
+ "● Res Changes: 0"
+ )
+ self.device_legend.setStyleSheet("""
+ color: #FFB300;
+ font-size: 12px;
+ font-weight: bold;
+ margin: 4px 8px;
+ padding: 4px 8px;
+ background-color: rgba(255, 179, 0, 0.15);
+ border-radius: 4px;
+ """)
+ device_layout.addWidget(self.device_legend)
+
+ device_frame.setLayout(device_layout)
+ splitter.addWidget(device_frame)
+
+ # Set splitter proportions with minimum space for each section
+ splitter.setSizes([300, 300, 250]) # Increased minimum sizes
+ splitter.setChildrenCollapsible(False) # Prevent collapsing sections
+
+ content_layout.addWidget(splitter)
+ content_widget.setLayout(content_layout)
+
+ # Set minimum size for content widget to ensure scrolling when needed
+ content_widget.setMinimumSize(400, 850) # Minimum width and height
+
+ # Add content widget to scroll area
+ scroll_area.setWidget(content_widget)
+
+ # Add scroll area to main layout
+ main_layout.addWidget(scroll_area)
+ self.setLayout(main_layout)
fps_layout = QVBoxLayout(fps_frame)
self.fps_graph = RealTimeGraph(
"FPS & Resolution Impact",
@@ -222,33 +632,267 @@ class PerformanceGraphsWidget(QWidget):
max_points=300
)
device_layout.addWidget(self.device_graph)
- self.device_legend = QLabel("CPU Spikes: 0 | GPU Spikes: 0 | Switches: 0 | Res Changes: 0")
- self.device_legend.setStyleSheet("color: #ffb300; font-size: 13px; font-weight: bold; margin: 2px 0 0 8px;")
- device_layout.addWidget(self.device_legend)
- device_frame.setLayout(device_layout)
- splitter.addWidget(device_frame)
- layout.addWidget(splitter)
- self.setLayout(layout)
+ # Add scroll area to main layout
+ main_layout.addWidget(scroll_area)
+ self.setLayout(main_layout)
+
+ @Slot()
+ def update_system_metrics(self):
+ """Update system CPU and RAM usage"""
+ try:
+ # Check if the widget is still valid and not being destroyed
+ if not self or not hasattr(self, 'isVisible') or not self.isVisible():
+ return
+
+ # Check if widgets still exist before updating
+ if not hasattr(self, 'cpu_ram_stats') or not self.cpu_ram_stats:
+ return
+ if not hasattr(self, 'device_graph') or not self.device_graph:
+ return
+
+ # Check if the RealTimeGraph objects are still valid
+ try:
+ if hasattr(self.device_graph, 'add_data_point'):
+ # Test if the object is still valid by accessing a simple property
+ _ = self.device_graph.objectName()
+ else:
+ return
+ except RuntimeError:
+ # Object has been deleted
+ return
+
+ if PSUTIL_AVAILABLE:
+ cpu_percent = psutil.cpu_percent(interval=None)
+ memory = psutil.virtual_memory()
+ ram_percent = memory.percent
+ else:
+ # Fallback values when psutil is not available
+ cpu_percent = 0.0
+ ram_percent = 0.0
+
+ if hasattr(self, 'cpu_usage_history'):
+ self.cpu_usage_history.append(cpu_percent)
+ if hasattr(self, 'ram_usage_history'):
+ self.ram_usage_history.append(ram_percent)
+
+ # Update display
+ try:
+ if PSUTIL_AVAILABLE:
+ self.cpu_ram_stats.setText(f"CPU: {cpu_percent:.1f}% | RAM: {ram_percent:.1f}%")
+ else:
+ self.cpu_ram_stats.setText("CPU: -- | RAM: -- (monitoring unavailable)")
+ except RuntimeError:
+ # Widget has been deleted
+ return
+
+ # Add CPU usage to device graph as background metric
+ try:
+ current_time = time.time() - self.start_time if self.start_time else 0
+ self.device_graph.add_data_point(current_time, cpu_percent, device="System")
+ except RuntimeError:
+ # Graph has been deleted
+ return
+
+ except Exception as e:
+ print(f"❌ Error updating system metrics: {e}")
+ # Fallback in case of any error
+ try:
+ if hasattr(self, 'cpu_ram_stats') and self.cpu_ram_stats:
+ self.cpu_ram_stats.setText("CPU: -- | RAM: -- (error)")
+ except:
+ pass
+
+ @Slot()
def update_graphs(self):
- # Placeholder for updating graphs with new data
- pass
+ """Update graphs with latest data"""
+ if not self.latest_data:
+ return
+
+ try:
+ chart_data = self.latest_data.get('chart_data', {})
+ latency_stats = self.latest_data.get('latency_stats', {})
+ current_metrics = self.latest_data.get('current_metrics', {})
+
+ if not chart_data.get('timestamps'):
+ return
+
+ # Get the latest data point
+ timestamps = chart_data.get('timestamps', [])
+ if not timestamps:
+ return
+
+ latest_timestamp = timestamps[-1]
+ current_time = time.time() - self.start_time if self.start_time else latest_timestamp
+
+ # Update latency graph
+ if 'inference_latency' in chart_data:
+ latency_values = chart_data['inference_latency']
+ if latency_values:
+ latest_latency = latency_values[-1]
+ is_spike = latest_latency > self.spike_threshold
+ device = current_metrics.get('device', 'CPU')
+
+ self.latency_graph.add_data_point(
+ current_time,
+ latest_latency,
+ is_spike=is_spike,
+ device=device
+ )
+
+ # Update latency statistics
+ self.latency_stats['max'] = max(self.latency_stats['max'], latest_latency)
+ self.latency_stats['min'] = min(self.latency_stats['min'], latest_latency)
+ if is_spike:
+ self.latency_stats['spike_count'] += 1
+ # Emit spike signal
+ self.spike_detected.emit({
+ 'latency': latest_latency,
+ 'timestamp': current_time,
+ 'device': device
+ })
+
+ # Calculate running average
+ if hasattr(self.latency_graph, 'y_data') and self.latency_graph.y_data:
+ self.latency_stats['avg'] = sum(self.latency_graph.y_data) / len(self.latency_graph.y_data)
+
+ # Update FPS graph
+ if 'fps' in chart_data:
+ fps_values = chart_data['fps']
+ if fps_values:
+ latest_fps = fps_values[-1]
+ device = current_metrics.get('device', 'CPU')
+ resolution = current_metrics.get('resolution', 'Unknown')
+
+ # Check for device switch
+ device_switched = device != self.previous_device
+ if device_switched:
+ self.device_switched.emit(device)
+ self.previous_device = device
+
+ self.fps_graph.add_data_point(
+ current_time,
+ latest_fps,
+ device=device,
+ is_res_change=False # Will be set by resolution change detection
+ )
+
+ # Update FPS stats display with model name
+ model_name = current_metrics.get('model', 'Unknown')
+ self.fps_stats.setText(f"Current FPS: {latest_fps:.1f} | Resolution: {resolution} | Device: {device} | Model: {model_name}")
+
+ # Update device switching graph
+ device_usage = chart_data.get('device_usage', [])
+ if device_usage:
+ latest_usage = device_usage[-1]
+ device = current_metrics.get('device', 'CPU')
+
+ self.device_graph.add_data_point(
+ current_time,
+ latest_usage * 100, # Convert to percentage
+ device=device
+ )
+
+ # Update statistics displays
+ self.latency_stats_label.setText(
+ f"Avg: {self.latency_stats['avg']:.1f}ms | "
+ f"Max: {self.latency_stats['max']:.1f}ms | "
+ f"Spikes: {self.latency_stats['spike_count']}"
+ )
+
+ # Update device legend
+ self.device_legend.setText(
+ f"● CPU Spikes: {self.latency_graph.spike_count} | "
+ f"● GPU Spikes: {self.device_graph.spike_count} | "
+ f"● Switches: {self.device_graph.device_switches} | "
+ f"● Res Changes: {self.device_graph.resolution_changes}"
+ )
+
+ # Update current model display
+ model_name = current_metrics.get('model', 'Unknown')
+ device = current_metrics.get('device', 'Unknown')
+ if hasattr(self, 'current_model_stats'):
+ self.current_model_stats.setText(f"Model: {model_name} | Device: {device}")
+
+ self.frame_counter += 1
+
+ except Exception as e:
+ print(f"❌ Error updating performance graphs: {e}")
+
def update_performance_data(self, analytics_data: Dict[str, Any]):
"""Update graphs with new analytics data, including system metrics"""
try:
print(f"[PERF DEBUG] update_performance_data called with: {analytics_data}")
+
+ # Initialize start time if not set
+ if self.start_time is None:
+ self.start_time = time.time()
+
chart_data = analytics_data.get('real_time_data', {})
latency_stats = analytics_data.get('latency_statistics', {})
current_metrics = analytics_data.get('current_metrics', {})
system_metrics = analytics_data.get('system_metrics', {})
+
if not chart_data.get('timestamps'):
print("[PERF DEBUG] No timestamps in chart_data")
return
+
self.latest_data = {
'chart_data': chart_data,
'latency_stats': latency_stats,
'current_metrics': current_metrics,
'system_metrics': system_metrics
}
- self.update_graphs() # Immediately update graphs on new data
+
+ # Emit signal for other components
+ self.performance_data_updated.emit(analytics_data)
+
+ # Immediately update graphs on new data
+ self.update_graphs()
+
except Exception as e:
print(f"❌ Error updating performance data: {e}")
+
+ def clear_all_graphs(self):
+ """Clear all graph data"""
+ try:
+ self.latency_graph.clear_data()
+ self.fps_graph.clear_data()
+ self.device_graph.clear_data()
+
+ # Reset statistics
+ self.latency_stats = {
+ 'avg': 0.0,
+ 'max': 0.0,
+ 'min': float('inf'),
+ 'spike_count': 0
+ }
+
+ self.frame_counter = 0
+ self.start_time = time.time()
+
+ # Update displays
+ self.latency_stats_label.setText("Avg: 0ms | Max: 0ms | Spikes: 0")
+ self.fps_stats.setText("Current FPS: 0 | Resolution: - | Device: -")
+ self.device_legend.setText(
+ "● CPU Spikes: 0 | "
+ "● GPU Spikes: 0 | "
+ "● Switches: 0 | "
+ "● Res Changes: 0"
+ )
+
+ except Exception as e:
+ print(f"❌ Error clearing graphs: {e}")
+
+ def set_spike_threshold(self, threshold: float):
+ """Set the threshold for detecting latency spikes"""
+ self.spike_threshold = threshold
+
+ def get_performance_summary(self) -> Dict[str, Any]:
+ """Get a summary of current performance metrics"""
+ return {
+ 'latency_stats': self.latency_stats.copy(),
+ 'frame_count': self.frame_counter,
+ 'cpu_usage': list(self.cpu_usage_history),
+ 'ram_usage': list(self.ram_usage_history),
+ 'current_device': self.previous_device
+ }
diff --git a/qt_app_pyside1/ui/video_detection_tab.py b/qt_app_pyside1/ui/video_detection_tab.py
index 58ec501..ca0f642 100644
--- a/qt_app_pyside1/ui/video_detection_tab.py
+++ b/qt_app_pyside1/ui/video_detection_tab.py
@@ -1,36 +1,502 @@
-from PySide6.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QSlider, QCheckBox, QFileDialog, QSizePolicy, QGridLayout, QFrame, QSpacerItem
+from PySide6.QtWidgets import (
+ QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QSlider, QCheckBox,
+ QFileDialog, QSizePolicy, QFrame, QTabWidget, QGridLayout, QComboBox,
+ QListWidget, QListWidgetItem, QGroupBox, QScrollArea
+)
from PySide6.QtCore import Signal, Qt
-from PySide6.QtGui import QPixmap, QIcon, QFont
+from PySide6.QtGui import QPixmap, QIcon
+import json
+import os
+from pathlib import Path
-class DiagnosticOverlay(QFrame):
- """Semi-transparent overlay for diagnostics."""
+class SmartIntersectionOverlay(QFrame):
+ """Advanced overlay for Smart Intersection analytics."""
def __init__(self, parent=None):
super().__init__(parent)
self.setStyleSheet("""
- background: rgba(0,0,0,0.5);
- border-radius: 8px;
+ background: rgba(0,20,40,0.85);
+ border: 2px solid #03DAC5;
+ border-radius: 12px;
color: #fff;
font-family: 'Consolas', 'SF Mono', 'monospace';
- font-size: 13px;
+ font-size: 12px;
""")
- # self.setFixedWidth(260) # Remove fixed width
- self.setFixedHeight(90)
- self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) # Allow horizontal stretch
+ self.setFixedHeight(140)
+ self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.setAttribute(Qt.WA_TransparentForMouseEvents)
+
layout = QVBoxLayout(self)
- layout.setContentsMargins(12, 8, 12, 8)
- self.model_label = QLabel("Model: -")
- self.device_label = QLabel("Device: -")
- self.stats_label = QLabel("Cars: 0 | Trucks: 0 | Ped: 0 | TLights: 0 | Moto: 0")
- for w in [self.model_label, self.device_label, self.stats_label]:
+ layout.setContentsMargins(16, 12, 16, 12)
+ layout.setSpacing(4)
+
+ # Title
+ title = QLabel("🚦 Smart Intersection Analytics")
+ title.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 14px;")
+ layout.addWidget(title)
+
+ # Scene data
+ self.scene_label = QLabel("Scene: Multi-Camera Fusion")
+ self.tracking_label = QLabel("Active Tracks: 0")
+ self.roi_label = QLabel("ROI Events: 0")
+
+ # Camera data
+ self.camera_label = QLabel("Cameras: North(0) East(0) South(0) West(0)")
+
+ # Analytics data
+ self.analytics_label = QLabel("Analytics: Crosswalk(0) Lane(0) Safety(0)")
+
+ for w in [self.scene_label, self.tracking_label, self.roi_label,
+ self.camera_label, self.analytics_label]:
w.setStyleSheet("color: #fff;")
layout.addWidget(w)
- layout.addStretch(1)
+
+ def update_smart_intersection(self, scene_data):
+ """Update smart intersection specific data"""
+ if not scene_data:
+ return
+
+ # Update tracking info
+ active_tracks = scene_data.get('active_tracks', 0)
+ self.tracking_label.setText(f"Active Tracks: {active_tracks}")
+
+ # Update ROI events
+ roi_events = scene_data.get('roi_events', 0)
+ self.roi_label.setText(f"ROI Events: {roi_events}")
+
+ # Update camera data
+ cameras = scene_data.get('cameras', {})
+ north = cameras.get('north', 0)
+ east = cameras.get('east', 0)
+ south = cameras.get('south', 0)
+ west = cameras.get('west', 0)
+ self.camera_label.setText(f"Cameras: North({north}) East({east}) South({south}) West({west})")
+
+ # Update analytics
+ analytics = scene_data.get('analytics', {})
+ crosswalk = analytics.get('crosswalk_events', 0)
+ lane = analytics.get('lane_events', 0)
+ safety = analytics.get('safety_events', 0)
+ self.analytics_label.setText(f"Analytics: Crosswalk({crosswalk}) Lane({lane}) Safety({safety})")
+
+
+class IntersectionROIWidget(QFrame):
+ """Widget for defining and managing ROI regions for smart intersection"""
+ roi_updated = Signal(dict)
+
+ def __init__(self, parent=None):
+ super().__init__(parent)
+ self.setStyleSheet("""
+ QFrame {
+ background: #1a1a1a;
+ border: 1px solid #424242;
+ border-radius: 8px;
+ }
+ """)
+ self.setFixedWidth(300)
+
+ layout = QVBoxLayout(self)
+ layout.setContentsMargins(16, 16, 16, 16)
+
+ # Title
+ title = QLabel("🎯 Region of Interest (ROI)")
+ title.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 14px;")
+ layout.addWidget(title)
+
+ # ROI Type selection
+ type_layout = QHBoxLayout()
+ type_layout.addWidget(QLabel("Type:"))
+ self.roi_type = QComboBox()
+ self.roi_type.addItems(["Crosswalk", "Traffic Lane", "Safety Zone", "Intersection Center"])
+ type_layout.addWidget(self.roi_type)
+ layout.addLayout(type_layout)
+
+ # ROI List
+ self.roi_list = QListWidget()
+ self.roi_list.setMaximumHeight(120)
+ layout.addWidget(self.roi_list)
+
+ # ROI Controls
+ roi_controls = QHBoxLayout()
+ self.add_roi_btn = QPushButton("Add ROI")
+ self.delete_roi_btn = QPushButton("Delete")
+ self.add_roi_btn.setStyleSheet("background: #27ae60; color: white; border-radius: 4px; padding: 6px;")
+ self.delete_roi_btn.setStyleSheet("background: #e74c3c; color: white; border-radius: 4px; padding: 6px;")
+ roi_controls.addWidget(self.add_roi_btn)
+ roi_controls.addWidget(self.delete_roi_btn)
+ layout.addLayout(roi_controls)
+
+ # Analytics settings
+ analytics_group = QGroupBox("Analytics Settings")
+ analytics_layout = QVBoxLayout(analytics_group)
+
+ self.enable_tracking = QCheckBox("Multi-Object Tracking")
+ self.enable_speed = QCheckBox("Speed Estimation")
+ self.enable_direction = QCheckBox("Direction Analysis")
+ self.enable_safety = QCheckBox("Safety Monitoring")
+
+ for cb in [self.enable_tracking, self.enable_speed, self.enable_direction, self.enable_safety]:
+ cb.setChecked(True)
+ cb.setStyleSheet("color: white;")
+ analytics_layout.addWidget(cb)
+
+ layout.addWidget(analytics_group)
+
+ # Connect signals
+ self.add_roi_btn.clicked.connect(self._add_roi)
+ self.delete_roi_btn.clicked.connect(self._delete_roi)
+
+ # Initialize with default ROIs
+ self._init_default_rois()
+
+ def _init_default_rois(self):
+ """Initialize with default intersection ROIs"""
+ default_rois = [
+ "North Crosswalk",
+ "South Crosswalk",
+ "East Crosswalk",
+ "West Crosswalk",
+ "Center Intersection",
+ "North Lane",
+ "South Lane",
+ "East Lane",
+ "West Lane"
+ ]
+
+ for roi in default_rois:
+ item = QListWidgetItem(roi)
+ item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
+ item.setCheckState(Qt.Checked)
+ self.roi_list.addItem(item)
+
+ def _add_roi(self):
+ """Add new ROI"""
+ roi_type = self.roi_type.currentText()
+ roi_name = f"{roi_type}_{self.roi_list.count() + 1}"
+
+ item = QListWidgetItem(roi_name)
+ item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
+ item.setCheckState(Qt.Checked)
+ self.roi_list.addItem(item)
+
+ self._emit_roi_update()
+
+ def _delete_roi(self):
+ """Delete selected ROI"""
+ current_row = self.roi_list.currentRow()
+ if current_row >= 0:
+ self.roi_list.takeItem(current_row)
+ self._emit_roi_update()
+
+ def _emit_roi_update(self):
+ """Emit ROI configuration update"""
+ roi_config = {
+ 'rois': [],
+ 'analytics': {
+ 'tracking': self.enable_tracking.isChecked(),
+ 'speed': self.enable_speed.isChecked(),
+ 'direction': self.enable_direction.isChecked(),
+ 'safety': self.enable_safety.isChecked()
+ }
+ }
+
+ for i in range(self.roi_list.count()):
+ item = self.roi_list.item(i)
+ roi_config['rois'].append({
+ 'name': item.text(),
+ 'enabled': item.checkState() == Qt.Checked
+ })
+
+ self.roi_updated.emit(roi_config)
+
+
+class MultiCameraView(QFrame):
+ """Multi-camera view for smart intersection"""
+ def __init__(self, parent=None):
+ super().__init__(parent)
+ self.setStyleSheet("""
+ QFrame {
+ background: #0a0a0a;
+ border: 2px solid #424242;
+ border-radius: 8px;
+ }
+ """)
+
+ layout = QGridLayout(self)
+ layout.setContentsMargins(8, 8, 8, 8)
+ layout.setSpacing(4)
+
+ # Create camera views
+ self.camera_views = {}
+ positions = [('North', 0, 1), ('West', 1, 0), ('East', 1, 2), ('South', 2, 1)]
+
+ for pos_name, row, col in positions:
+ view = self._create_camera_view(pos_name)
+ self.camera_views[pos_name.lower()] = view
+ layout.addWidget(view, row, col)
+
+ # Center intersection view
+ center_view = self._create_intersection_center()
+ layout.addWidget(center_view, 1, 1)
+
+ def _create_camera_view(self, position):
+ """Create individual camera view"""
+ view = QFrame()
+ view.setStyleSheet("""
+ background: #1a1a1a;
+ border: 1px solid #555;
+ border-radius: 4px;
+ """)
+ view.setMinimumSize(160, 120)
+ view.setMaximumSize(200, 150)
+
+ layout = QVBoxLayout(view)
+ layout.setContentsMargins(4, 4, 4, 4)
+
+ # Title
+ title = QLabel(f"📹 {position}")
+ title.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 10px;")
+ title.setAlignment(Qt.AlignCenter)
+ layout.addWidget(title)
+
+ # Video area
+ video_area = QLabel("No feed")
+ video_area.setStyleSheet("background: #000; color: #666; border: 1px dashed #333;")
+ video_area.setAlignment(Qt.AlignCenter)
+ video_area.setMinimumHeight(80)
+ layout.addWidget(video_area)
+
+ # Stats
+ stats = QLabel("Objects: 0")
+ stats.setStyleSheet("color: #aaa; font-size: 9px;")
+ stats.setAlignment(Qt.AlignCenter)
+ layout.addWidget(stats)
+
+ return view
+
+ def _create_intersection_center(self):
+ """Create center intersection overview"""
+ view = QFrame()
+ view.setStyleSheet("""
+ background: #2a1a1a;
+ border: 2px solid #03DAC5;
+ border-radius: 8px;
+ """)
+ view.setMinimumSize(160, 120)
+ view.setMaximumSize(200, 150)
+
+ layout = QVBoxLayout(view)
+ layout.setContentsMargins(8, 8, 8, 8)
+
+ title = QLabel("🚦 Intersection")
+ title.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 12px;")
+ title.setAlignment(Qt.AlignCenter)
+ layout.addWidget(title)
+
+ # Intersection map
+ map_area = QLabel("Scene Map")
+ map_area.setStyleSheet("background: #000; color: #03DAC5; border: 1px solid #03DAC5;")
+ map_area.setAlignment(Qt.AlignCenter)
+ map_area.setMinimumHeight(80)
+ layout.addWidget(map_area)
+
+ # Total stats
+ total_stats = QLabel("Total Objects: 0")
+ total_stats.setStyleSheet("color: #03DAC5; font-size: 10px; font-weight: bold;")
+ total_stats.setAlignment(Qt.AlignCenter)
+ layout.addWidget(total_stats)
+
+ return view
+
+ def update_camera_feed(self, camera_position, pixmap, object_count=0):
+ """Update specific camera feed"""
+ if camera_position.lower() in self.camera_views:
+ view = self.camera_views[camera_position.lower()]
+ video_label = view.findChild(QLabel)
+ if video_label and pixmap:
+ scaled = pixmap.scaled(video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
+ video_label.setPixmap(scaled)
+
+ # Update stats
+ stats_labels = view.findChildren(QLabel)
+ if len(stats_labels) >= 3: # title, video, stats
+ stats_labels[2].setText(f"Objects: {object_count}")
+
+
+class EnhancedPerformanceOverlay(QFrame):
+ """Enhanced performance metrics overlay with traffic light status."""
+ def __init__(self, parent=None):
+ super().__init__(parent)
+ self.setStyleSheet("""
+ QFrame {
+ background: rgba(20, 30, 40, 0.95);
+ border: 2px solid #03DAC5;
+ border-radius: 12px;
+ color: #fff;
+ font-family: 'Segoe UI', 'Arial', sans-serif;
+ }
+ """)
+ self.setFixedHeight(140)
+ self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
+ self.setAttribute(Qt.WA_TransparentForMouseEvents)
+
+ layout = QVBoxLayout(self)
+ layout.setContentsMargins(16, 12, 16, 12)
+ layout.setSpacing(8)
+
+ # Title row
+ title_layout = QHBoxLayout()
+ title = QLabel("📊 Real-time Performance Metrics")
+ title.setStyleSheet("""
+ color: #03DAC5;
+ font-weight: bold;
+ font-size: 14px;
+ margin-bottom: 4px;
+ """)
+ title_layout.addWidget(title)
+ title_layout.addStretch()
+
+ # Traffic light status
+ self.traffic_light_status = QLabel("🚦 Traffic: Unknown")
+ self.traffic_light_status.setStyleSheet("""
+ color: #FFD700;
+ font-weight: bold;
+ font-size: 13px;
+ background: rgba(0,0,0,0.3);
+ padding: 4px 8px;
+ border-radius: 6px;
+ """)
+ title_layout.addWidget(self.traffic_light_status)
+ layout.addLayout(title_layout)
+
+ # Performance metrics row
+ perf_layout = QHBoxLayout()
+ perf_layout.setSpacing(16)
+
+ # FPS and Inference in badges
+ self.fps_label = QLabel("FPS: --")
+ self.fps_label.setStyleSheet("""
+ background: #27AE60;
+ color: white;
+ font-weight: bold;
+ font-size: 13px;
+ padding: 6px 12px;
+ border-radius: 8px;
+ min-width: 70px;
+ """)
+ self.fps_label.setAlignment(Qt.AlignCenter)
+
+ self.inference_label = QLabel("Inference: -- ms")
+ self.inference_label.setStyleSheet("""
+ background: #3498DB;
+ color: white;
+ font-weight: bold;
+ font-size: 13px;
+ padding: 6px 12px;
+ border-radius: 8px;
+ min-width: 110px;
+ """)
+ self.inference_label.setAlignment(Qt.AlignCenter)
+
+ perf_layout.addWidget(self.fps_label)
+ perf_layout.addWidget(self.inference_label)
+ perf_layout.addStretch()
+ layout.addLayout(perf_layout)
+
+ # System info row
+ system_layout = QHBoxLayout()
+ self.model_label = QLabel("Model: -")
+ self.model_label.setStyleSheet("""
+ color: #E74C3C;
+ font-weight: bold;
+ font-size: 12px;
+ background: rgba(231, 76, 60, 0.1);
+ padding: 4px 8px;
+ border-radius: 6px;
+ """)
+
+ self.device_label = QLabel("Device: -")
+ self.device_label.setStyleSheet("""
+ color: #9B59B6;
+ font-weight: bold;
+ font-size: 12px;
+ background: rgba(155, 89, 182, 0.1);
+ padding: 4px 8px;
+ border-radius: 6px;
+ """)
+
+ system_layout.addWidget(self.model_label)
+ system_layout.addWidget(self.device_label)
+ system_layout.addStretch()
+ layout.addLayout(system_layout)
+
+ # Vehicle counts row
+ self.vehicle_stats_label = QLabel("🚗 Vehicles: 0 | 🚛 Trucks: 0 | 🚶 Pedestrians: 0 | 🏍️ Motorcycles: 0")
+ self.vehicle_stats_label.setStyleSheet("""
+ color: #F39C12;
+ font-weight: bold;
+ font-size: 12px;
+ background: rgba(243, 156, 18, 0.1);
+ padding: 6px 10px;
+ border-radius: 6px;
+ """)
+ layout.addWidget(self.vehicle_stats_label)
def update_overlay(self, model, device, cars, trucks, peds, tlights, motorcycles):
+ """Update performance metrics"""
self.model_label.setText(f"Model: {model}")
self.device_label.setText(f"Device: {device}")
- self.stats_label.setText(f"Cars: {cars} | Trucks: {trucks} | Ped: {peds} | TLights: {tlights} | Moto: {motorcycles}")
+ self.vehicle_stats_label.setText(f"🚗 Vehicles: {cars} | 🚛 Trucks: {trucks} | 🚶 Pedestrians: {peds} | 🏍️ Motorcycles: {motorcycles}")
+
+ def update_performance_metrics(self, fps, inference_time):
+ """Update FPS and inference time"""
+ if fps is not None:
+ self.fps_label.setText(f"FPS: {fps:.1f}")
+ else:
+ self.fps_label.setText("FPS: --")
+
+ if inference_time is not None:
+ self.inference_label.setText(f"Inference: {inference_time:.1f} ms")
+ else:
+ self.inference_label.setText("Inference: -- ms")
+
+ def update_traffic_light_status(self, traffic_light_data):
+ """Update traffic light status"""
+ if traffic_light_data and isinstance(traffic_light_data, dict):
+ color = traffic_light_data.get('color', 'unknown')
+ confidence = traffic_light_data.get('confidence', 0)
+
+ if color.lower() == 'red':
+ icon = "🔴"
+ text_color = "#E74C3C"
+ elif color.lower() == 'yellow':
+ icon = "🟡"
+ text_color = "#F39C12"
+ elif color.lower() == 'green':
+ icon = "🟢"
+ text_color = "#27AE60"
+ else:
+ icon = "⚫"
+ text_color = "#95A5A6"
+
+ self.traffic_light_status.setText(f"{icon} Traffic: {color.title()} ({confidence:.2f})")
+ self.traffic_light_status.setStyleSheet(f"""
+ color: {text_color};
+ font-weight: bold;
+ font-size: 13px;
+ background: rgba(0,0,0,0.3);
+ padding: 4px 8px;
+ border-radius: 6px;
+ """)
+ else:
+ self.traffic_light_status.setText("🚦 Traffic: Unknown")
+ self.traffic_light_status.setStyleSheet("""
+ color: #95A5A6;
+ font-weight: bold;
+ font-size: 13px;
+ background: rgba(0,0,0,0.3);
+ padding: 4px 8px;
+ border-radius: 6px;
+ """)
class VideoDetectionTab(QWidget):
file_selected = Signal(str)
@@ -40,16 +506,202 @@ class VideoDetectionTab(QWidget):
detection_toggled = Signal(bool)
screenshot_clicked = Signal()
seek_changed = Signal(int)
- auto_select_model_device = Signal() # New signal for auto model/device selection
+ auto_select_model_device = Signal()
+
+ # Smart Intersection signals
+ smart_intersection_enabled = Signal(bool)
+ multi_camera_mode_enabled = Signal(bool)
+ roi_configuration_changed = Signal(dict)
+ scene_analytics_toggled = Signal(bool)
def __init__(self):
super().__init__()
self.video_loaded = False
- grid = QGridLayout(self)
- grid.setContentsMargins(32, 24, 32, 24)
- grid.setSpacing(0)
- # File select bar (top)
- file_bar = QHBoxLayout()
+ self.smart_intersection_mode = False
+ self.multi_camera_mode = False
+
+ # Load smart intersection config
+ self.load_smart_intersection_config()
+
+ # Main layout
+ main_layout = QHBoxLayout(self)
+ main_layout.setContentsMargins(16, 16, 16, 16)
+ main_layout.setSpacing(16)
+
+ # Left panel - video and controls
+ left_panel = self._create_left_panel()
+ main_layout.addWidget(left_panel, 3) # 3/4 of the space
+
+ # Right panel - smart intersection controls
+ right_panel = self._create_right_panel()
+ main_layout.addWidget(right_panel, 1) # 1/4 of the space
+
+ def load_smart_intersection_config(self):
+ """Load smart intersection configuration"""
+ config_path = Path(__file__).parent.parent / "config" / "smart-intersection" / "desktop-config.json"
+ try:
+ if config_path.exists():
+ with open(config_path, 'r') as f:
+ self.smart_config = json.load(f)
+ else:
+ self.smart_config = self._get_default_config()
+ except Exception as e:
+ print(f"Error loading smart intersection config: {e}")
+ self.smart_config = self._get_default_config()
+
+ def _get_default_config(self):
+ """Get default smart intersection configuration"""
+ return {
+ "desktop_app_config": {
+ "scene_analytics": {
+ "enable_multi_camera": True,
+ "enable_roi_analytics": True,
+ "enable_vlm_integration": True
+ },
+ "camera_settings": {
+ "max_cameras": 4,
+ "default_fps": 30
+ },
+ "analytics_settings": {
+ "object_tracking": True,
+ "speed_estimation": True,
+ "direction_analysis": True,
+ "safety_monitoring": True
+ }
+ }
+ }
+
+ def _create_left_panel(self):
+ """Create main video panel"""
+ panel = QWidget()
+ layout = QVBoxLayout(panel)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.setSpacing(16)
+
+ # Smart Intersection Mode Toggle
+ mode_bar = self._create_mode_bar()
+ layout.addWidget(mode_bar)
+
+ # File select bar
+ file_bar = self._create_file_bar()
+ layout.addWidget(file_bar)
+
+ # Video display area (with tabs for different modes)
+ self.video_tabs = QTabWidget()
+ self.video_tabs.setStyleSheet("""
+ QTabWidget::pane {
+ border: 1px solid #424242;
+ background: #121212;
+ border-radius: 8px;
+ }
+ QTabBar::tab {
+ background: #232323;
+ color: #fff;
+ padding: 8px 16px;
+ margin-right: 2px;
+ border-top-left-radius: 8px;
+ border-top-right-radius: 8px;
+ }
+ QTabBar::tab:selected {
+ background: #03DAC5;
+ color: #000;
+ }
+ """)
+
+ # Single camera tab
+ self.single_cam_widget = self._create_single_camera_view()
+ self.video_tabs.addTab(self.single_cam_widget, "📹 Single Camera")
+
+ # Multi-camera tab
+ self.multi_cam_widget = MultiCameraView()
+ self.video_tabs.addTab(self.multi_cam_widget, "🚦 Multi-Camera Intersection")
+
+ layout.addWidget(self.video_tabs)
+
+ # Analytics overlay
+ self.analytics_overlay = self._create_analytics_overlay()
+ layout.addWidget(self.analytics_overlay)
+
+ # Control bar
+ control_bar = self._create_control_bar()
+ layout.addWidget(control_bar)
+
+ return panel
+
+ def _create_mode_bar(self):
+ """Create smart intersection mode toggle bar"""
+ bar = QFrame()
+ bar.setStyleSheet("""
+ QFrame {
+ background: #1a2332;
+ border: 2px solid #03DAC5;
+ border-radius: 12px;
+ padding: 8px;
+ }
+ """)
+ bar.setFixedHeight(60)
+
+ layout = QHBoxLayout(bar)
+ layout.setContentsMargins(16, 8, 16, 8)
+
+ # Smart Intersection Toggle
+ self.smart_intersection_toggle = QCheckBox("🚦 Smart Intersection Mode")
+ self.smart_intersection_toggle.setStyleSheet("""
+ QCheckBox {
+ color: #03DAC5;
+ font-weight: bold;
+ font-size: 14px;
+ }
+ QCheckBox::indicator {
+ width: 20px;
+ height: 20px;
+ }
+ QCheckBox::indicator:checked {
+ background: #03DAC5;
+ border: 2px solid #03DAC5;
+ border-radius: 4px;
+ }
+ """)
+ self.smart_intersection_toggle.toggled.connect(self._toggle_smart_intersection)
+ layout.addWidget(self.smart_intersection_toggle)
+
+ layout.addSpacing(32)
+
+ # Multi-camera Toggle
+ self.multi_camera_toggle = QCheckBox("📹 Multi-Camera Fusion")
+ self.multi_camera_toggle.setStyleSheet("""
+ QCheckBox {
+ color: #e67e22;
+ font-weight: bold;
+ font-size: 14px;
+ }
+ QCheckBox::indicator {
+ width: 20px;
+ height: 20px;
+ }
+ QCheckBox::indicator:checked {
+ background: #e67e22;
+ border: 2px solid #e67e22;
+ border-radius: 4px;
+ }
+ """)
+ self.multi_camera_toggle.toggled.connect(self._toggle_multi_camera)
+ layout.addWidget(self.multi_camera_toggle)
+
+ layout.addStretch()
+
+ # Status indicator
+ self.mode_status = QLabel("Standard Detection Mode")
+ self.mode_status.setStyleSheet("color: #bbb; font-size: 12px;")
+ layout.addWidget(self.mode_status)
+
+ return bar
+
+ def _create_file_bar(self):
+ """Create file selection bar"""
+ widget = QWidget()
+ bar = QHBoxLayout(widget)
+
self.file_btn = QPushButton()
self.file_btn.setIcon(QIcon.fromTheme("folder-video"))
self.file_btn.setText("Select Video")
@@ -57,10 +709,20 @@ class VideoDetectionTab(QWidget):
self.file_label = QLabel("No file selected")
self.file_label.setStyleSheet("color: #bbb; font-size: 13px;")
self.file_btn.clicked.connect(self._select_file)
- file_bar.addWidget(self.file_btn)
- file_bar.addWidget(self.file_label)
- file_bar.addStretch(1)
- # Video display area (centered, scalable)
+
+ bar.addWidget(self.file_btn)
+ bar.addWidget(self.file_label)
+ bar.addStretch()
+
+ return widget
+
+ def _create_single_camera_view(self):
+ """Create single camera view widget"""
+ widget = QWidget()
+ layout = QVBoxLayout(widget)
+ layout.setContentsMargins(0, 0, 0, 0)
+
+ # Video frame
video_frame = QFrame()
video_frame.setStyleSheet("""
background: #121212;
@@ -69,9 +731,11 @@ class VideoDetectionTab(QWidget):
""")
video_frame.setMinimumSize(640, 360)
video_frame.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
+
video_layout = QVBoxLayout(video_frame)
video_layout.setContentsMargins(0, 0, 0, 0)
video_layout.setAlignment(Qt.AlignCenter)
+
self.video_label = QLabel()
self.video_label.setAlignment(Qt.AlignCenter)
self.video_label.setStyleSheet("background: transparent; color: #888; font-size: 18px;")
@@ -79,31 +743,54 @@ class VideoDetectionTab(QWidget):
self.video_label.setMinimumSize(640, 360)
self.video_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
video_layout.addWidget(self.video_label)
- # Diagnostic overlay (now below video, not over it)
- self.overlay = DiagnosticOverlay()
- self.overlay.setStyleSheet(self.overlay.styleSheet() + "border: 1px solid #03DAC5;")
- self.overlay.setFixedHeight(90)
- # FPS and Inference badges (below video)
+
+ layout.addWidget(video_frame)
+ return widget
+
+ def _create_analytics_overlay(self):
+ """Create analytics overlay that switches based on mode"""
+ container = QWidget()
+ self.overlay_layout = QVBoxLayout(container)
+ self.overlay_layout.setContentsMargins(0, 0, 0, 0)
+
+ # Standard overlay
+ self.standard_overlay = EnhancedPerformanceOverlay()
+ self.standard_overlay.setStyleSheet(self.standard_overlay.styleSheet() + "border: 1px solid #03DAC5;")
+
+ # Smart intersection overlay
+ self.smart_overlay = SmartIntersectionOverlay()
+
+ # Badge bar
+ self.badge_bar = QHBoxLayout()
+ self.badge_bar.setContentsMargins(0, 8, 0, 8)
+
self.fps_badge = QLabel("FPS: --")
self.fps_badge.setStyleSheet("background: #27ae60; color: #fff; border-radius: 12px; padding: 4px 24px; font-weight: bold; font-size: 15px;")
self.fps_badge.setAlignment(Qt.AlignCenter)
+
self.inference_badge = QLabel("Inference: -- ms")
self.inference_badge.setStyleSheet("background: #2980b9; color: #fff; border-radius: 12px; padding: 4px 24px; font-weight: bold; font-size: 15px;")
self.inference_badge.setAlignment(Qt.AlignCenter)
- # Horizontal layout for overlay and badges
- self.badge_bar = QHBoxLayout()
- self.badge_bar.setContentsMargins(0, 8, 0, 8)
+
self.badge_bar.addWidget(self.fps_badge)
self.badge_bar.addSpacing(12)
self.badge_bar.addWidget(self.inference_badge)
self.badge_bar.addSpacing(18)
- self.badge_bar.addWidget(self.overlay) # Overlay will stretch to fill right side
- self.badge_bar.addStretch(10)
- video_layout.addStretch(1) # Push badge bar to the bottom
- video_layout.addLayout(self.badge_bar)
- # Control bar (bottom)
- control_bar = QHBoxLayout()
+
+ # Add current overlay (start with standard)
+ self.current_overlay = self.standard_overlay
+ self.badge_bar.addWidget(self.current_overlay)
+ self.badge_bar.addStretch()
+
+ self.overlay_layout.addLayout(self.badge_bar)
+ return container
+
+ def _create_control_bar(self):
+ """Create control bar"""
+ widget = QWidget()
+ control_bar = QHBoxLayout(widget)
control_bar.setContentsMargins(0, 16, 0, 0)
+
# Playback controls
self.play_btn = QPushButton()
self.play_btn.setIcon(QIcon.fromTheme("media-playback-start"))
@@ -111,24 +798,30 @@ class VideoDetectionTab(QWidget):
self.play_btn.setFixedSize(48, 48)
self.play_btn.setEnabled(False)
self.play_btn.setStyleSheet(self._button_style())
+
self.pause_btn = QPushButton()
self.pause_btn.setIcon(QIcon.fromTheme("media-playback-pause"))
self.pause_btn.setToolTip("Pause")
self.pause_btn.setFixedSize(48, 48)
self.pause_btn.setEnabled(False)
self.pause_btn.setStyleSheet(self._button_style())
+
self.stop_btn = QPushButton()
self.stop_btn.setIcon(QIcon.fromTheme("media-playback-stop"))
self.stop_btn.setToolTip("Stop")
self.stop_btn.setFixedSize(48, 48)
self.stop_btn.setEnabled(False)
self.stop_btn.setStyleSheet(self._button_style())
- for btn, sig in zip([self.play_btn, self.pause_btn, self.stop_btn], [self.play_clicked.emit, self.pause_clicked.emit, self.stop_clicked.emit]):
+
+ for btn, sig in zip([self.play_btn, self.pause_btn, self.stop_btn],
+ [self.play_clicked.emit, self.pause_clicked.emit, self.stop_clicked.emit]):
btn.clicked.connect(sig)
+
control_bar.addWidget(self.play_btn)
control_bar.addWidget(self.pause_btn)
control_bar.addWidget(self.stop_btn)
control_bar.addSpacing(16)
+
# Progress bar
self.progress = QSlider(Qt.Horizontal)
self.progress.setStyleSheet("QSlider::groove:horizontal { height: 6px; background: #232323; border-radius: 3px; } QSlider::handle:horizontal { background: #03DAC5; border-radius: 8px; width: 18px; }")
@@ -136,10 +829,12 @@ class VideoDetectionTab(QWidget):
self.progress.setEnabled(False)
self.progress.valueChanged.connect(self.seek_changed.emit)
control_bar.addWidget(self.progress, 2)
+
self.timestamp = QLabel("00:00 / 00:00")
self.timestamp.setStyleSheet("color: #bbb; font-size: 13px;")
control_bar.addWidget(self.timestamp)
control_bar.addSpacing(16)
+
# Detection toggle & screenshot
self.detection_toggle = QCheckBox("Enable Detection")
self.detection_toggle.setChecked(True)
@@ -147,6 +842,7 @@ class VideoDetectionTab(QWidget):
self.detection_toggle.setEnabled(False)
self.detection_toggle.toggled.connect(self.detection_toggled.emit)
control_bar.addWidget(self.detection_toggle)
+
self.screenshot_btn = QPushButton()
self.screenshot_btn.setIcon(QIcon.fromTheme("camera-photo"))
self.screenshot_btn.setText("Screenshot")
@@ -155,15 +851,157 @@ class VideoDetectionTab(QWidget):
self.screenshot_btn.setStyleSheet(self._button_style())
self.screenshot_btn.clicked.connect(self.screenshot_clicked.emit)
control_bar.addWidget(self.screenshot_btn)
- control_bar.addStretch(1)
- # Layout grid
- grid.addLayout(file_bar, 0, 0, 1, 1)
- grid.addWidget(video_frame, 1, 0, 1, 1)
- grid.addLayout(self.badge_bar, 2, 0, 1, 1)
- grid.addLayout(control_bar, 3, 0, 1, 1)
- grid.setRowStretch(1, 1)
- self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
-
+ control_bar.addStretch()
+
+ return widget
+
+ def _create_right_panel(self):
+ """Create right panel for smart intersection controls"""
+ panel = QScrollArea()
+ panel.setWidgetResizable(True)
+ panel.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
+ panel.setStyleSheet("""
+ QScrollArea {
+ background: #1a1a1a;
+ border: 1px solid #424242;
+ border-radius: 8px;
+ }
+ """)
+
+ content = QWidget()
+ layout = QVBoxLayout(content)
+ layout.setContentsMargins(16, 16, 16, 16)
+ layout.setSpacing(16)
+
+ # Smart Intersection Controls
+ intersection_group = QGroupBox("🚦 Smart Intersection")
+ intersection_group.setStyleSheet("""
+ QGroupBox {
+ color: #03DAC5;
+ font-weight: bold;
+ font-size: 14px;
+ border: 2px solid #03DAC5;
+ border-radius: 8px;
+ margin-top: 12px;
+ padding-top: 8px;
+ }
+ QGroupBox::title {
+ subcontrol-origin: margin;
+ left: 16px;
+ padding: 0 8px 0 8px;
+ }
+ """)
+
+ intersection_layout = QVBoxLayout(intersection_group)
+
+ # Scene Analytics Toggle
+ self.scene_analytics_toggle = QCheckBox("Scene Analytics")
+ self.scene_analytics_toggle.setChecked(True)
+ self.scene_analytics_toggle.setStyleSheet("color: white; font-size: 12px;")
+ self.scene_analytics_toggle.toggled.connect(self.scene_analytics_toggled.emit)
+ intersection_layout.addWidget(self.scene_analytics_toggle)
+
+ # Multi-object tracking
+ self.multi_tracking_toggle = QCheckBox("Multi-Object Tracking")
+ self.multi_tracking_toggle.setChecked(True)
+ self.multi_tracking_toggle.setStyleSheet("color: white; font-size: 12px;")
+ intersection_layout.addWidget(self.multi_tracking_toggle)
+
+ # Speed estimation
+ self.speed_estimation_toggle = QCheckBox("Speed Estimation")
+ self.speed_estimation_toggle.setChecked(True)
+ self.speed_estimation_toggle.setStyleSheet("color: white; font-size: 12px;")
+ intersection_layout.addWidget(self.speed_estimation_toggle)
+
+ layout.addWidget(intersection_group)
+
+ # ROI Management
+ self.roi_widget = IntersectionROIWidget()
+ self.roi_widget.roi_updated.connect(self.roi_configuration_changed.emit)
+ layout.addWidget(self.roi_widget)
+
+ # Analytics Summary - Simplified
+ analytics_group = QGroupBox("📊 Quick Stats")
+ analytics_group.setStyleSheet(intersection_group.styleSheet().replace("#03DAC5", "#e67e22"))
+ analytics_layout = QVBoxLayout(analytics_group)
+
+ self.total_objects_label = QLabel("Total Objects: 0")
+ self.active_vehicles_label = QLabel("Active Vehicles: 0")
+ self.traffic_status_label = QLabel("Traffic Light: Unknown")
+
+ for label in [self.total_objects_label, self.active_vehicles_label, self.traffic_status_label]:
+ label.setStyleSheet("color: white; font-size: 12px; padding: 4px;")
+ analytics_layout.addWidget(label)
+
+ layout.addWidget(analytics_group)
+
+ # Performance Monitoring
+ perf_group = QGroupBox("⚡ Performance")
+ perf_group.setStyleSheet(intersection_group.styleSheet().replace("#03DAC5", "#9b59b6"))
+ perf_layout = QVBoxLayout(perf_group)
+
+ self.gpu_usage_label = QLabel("GPU Usage: -%")
+ self.memory_usage_label = QLabel("Memory: - MB")
+ self.processing_time_label = QLabel("Processing: - ms")
+
+ for label in [self.gpu_usage_label, self.memory_usage_label, self.processing_time_label]:
+ label.setStyleSheet("color: white; font-size: 12px;")
+ perf_layout.addWidget(label)
+
+ layout.addWidget(perf_group)
+
+ layout.addStretch()
+
+ panel.setWidget(content)
+ return panel
+
+ def _toggle_smart_intersection(self, enabled):
+ """Toggle smart intersection mode"""
+ self.smart_intersection_mode = enabled
+ self.smart_intersection_enabled.emit(enabled)
+
+ # Switch overlay
+ if enabled:
+ self._switch_to_smart_overlay()
+ self.mode_status.setText("🚦 Smart Intersection Active")
+ self.mode_status.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 12px;")
+ else:
+ self._switch_to_standard_overlay()
+ self.mode_status.setText("Standard Detection Mode")
+ self.mode_status.setStyleSheet("color: #bbb; font-size: 12px;")
+
+ # Enable/disable multi-camera toggle
+ self.multi_camera_toggle.setEnabled(enabled)
+ if not enabled:
+ self.multi_camera_toggle.setChecked(False)
+
+ def _toggle_multi_camera(self, enabled):
+ """Toggle multi-camera mode"""
+ self.multi_camera_mode = enabled
+ self.multi_camera_mode_enabled.emit(enabled)
+
+ if enabled:
+ self.video_tabs.setCurrentIndex(1) # Switch to multi-camera tab
+ self.mode_status.setText("🚦 Multi-Camera Intersection Active")
+ else:
+ self.video_tabs.setCurrentIndex(0) # Switch to single camera tab
+ if self.smart_intersection_mode:
+ self.mode_status.setText("🚦 Smart Intersection Active")
+
+ def _switch_to_smart_overlay(self):
+ """Switch to smart intersection overlay"""
+ self.badge_bar.removeWidget(self.current_overlay)
+ self.current_overlay.setParent(None)
+ self.current_overlay = self.smart_overlay
+ self.badge_bar.addWidget(self.current_overlay)
+
+ def _switch_to_standard_overlay(self):
+ """Switch to standard overlay"""
+ self.badge_bar.removeWidget(self.current_overlay)
+ self.current_overlay.setParent(None)
+ self.current_overlay = self.standard_overlay
+ self.badge_bar.addWidget(self.current_overlay)
+
def _button_style(self):
return """
QPushButton {
@@ -190,7 +1028,7 @@ class VideoDetectionTab(QWidget):
self.video_loaded = True
self._enable_controls(True)
self.video_label.setText("")
- self.auto_select_model_device.emit() # Request auto model/device selection
+ self.auto_select_model_device.emit()
def _enable_controls(self, enabled):
self.play_btn.setEnabled(enabled)
@@ -200,13 +1038,19 @@ class VideoDetectionTab(QWidget):
self.detection_toggle.setEnabled(enabled)
self.screenshot_btn.setEnabled(enabled)
if enabled:
- self.auto_select_model_device.emit() # Also trigger auto-select when controls are enabled
+ self.auto_select_model_device.emit()
def update_display(self, pixmap):
- # Maintain aspect ratio
+ """Update display with new frame"""
if pixmap:
- scaled = pixmap.scaled(self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
- self.video_label.setPixmap(scaled)
+ if self.multi_camera_mode:
+ # In multi-camera mode, distribute to different camera views
+ # For now, just update the single view
+ scaled = pixmap.scaled(self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
+ self.video_label.setPixmap(scaled)
+ else:
+ scaled = pixmap.scaled(self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
+ self.video_label.setPixmap(scaled)
self._set_controls_enabled(True)
self.video_label.setStyleSheet("background: transparent; color: #888; font-size: 18px;")
else:
@@ -220,35 +1064,108 @@ class VideoDetectionTab(QWidget):
btn.setEnabled(enabled)
def update_stats(self, stats):
- # Accepts a stats dict for extensibility
- cars = stats.get('cars', 0)
- trucks = stats.get('trucks', 0)
- peds = stats.get('peds', 0)
- tlights = stats.get('tlights', 0)
- motorcycles = stats.get('motorcycles', 0)
+ """Update statistics display"""
+ if self.smart_intersection_mode:
+ # Update smart intersection overlay
+ scene_data = {
+ 'active_tracks': stats.get('total_objects', 0),
+ 'roi_events': stats.get('roi_events', 0),
+ 'cameras': {
+ 'north': stats.get('north_objects', 0),
+ 'east': stats.get('east_objects', 0),
+ 'south': stats.get('south_objects', 0),
+ 'west': stats.get('west_objects', 0)
+ },
+ 'analytics': {
+ 'crosswalk_events': stats.get('crosswalk_events', 0),
+ 'lane_events': stats.get('lane_events', 0),
+ 'safety_events': stats.get('safety_events', 0)
+ }
+ }
+ self.smart_overlay.update_smart_intersection(scene_data)
+
+ # Update right panel quick stats
+ self.total_objects_label.setText(f"Total Objects: {stats.get('total_objects', 0)}")
+ active_vehicles = stats.get('cars', 0) + stats.get('trucks', 0) + stats.get('motorcycles', 0)
+ self.active_vehicles_label.setText(f"Active Vehicles: {active_vehicles}")
+ traffic_light = stats.get('traffic_light', {})
+ if traffic_light and isinstance(traffic_light, dict):
+ color = traffic_light.get('color', 'Unknown')
+ self.traffic_status_label.setText(f"Traffic Light: {color.title()}")
+ else:
+ self.traffic_status_label.setText("Traffic Light: Unknown")
+ else:
+ # Update enhanced performance overlay
+ cars = stats.get('cars', 0)
+ trucks = stats.get('trucks', 0)
+ peds = stats.get('peds', 0)
+ tlights = stats.get('tlights', 0)
+ motorcycles = stats.get('motorcycles', 0)
+ model = stats.get('model', stats.get('model_name', '-'))
+ device = stats.get('device', stats.get('device_name', '-'))
+
+ # Update vehicle counts and system info
+ self.standard_overlay.update_overlay(model, device, cars, trucks, peds, tlights, motorcycles)
+
+ # Update performance metrics (FPS and inference time)
+ fps = stats.get('fps', None)
+ inference = stats.get('inference', stats.get('detection_time', stats.get('detection_time_ms', None)))
+ self.standard_overlay.update_performance_metrics(fps, inference)
+
+ # Update traffic light status
+ traffic_light = stats.get('traffic_light', None)
+ self.standard_overlay.update_traffic_light_status(traffic_light)
+
+ # Update right panel quick stats for standard mode too
+ total_objects = cars + trucks + peds + motorcycles + tlights
+ self.total_objects_label.setText(f"Total Objects: {total_objects}")
+ active_vehicles = cars + trucks + motorcycles
+ self.active_vehicles_label.setText(f"Active Vehicles: {active_vehicles}")
+ if traffic_light and isinstance(traffic_light, dict):
+ color = traffic_light.get('color', 'Unknown')
+ self.traffic_status_label.setText(f"Traffic Light: {color.title()}")
+ else:
+ self.traffic_status_label.setText("Traffic Light: Unknown")
+
+ # Update performance badges (keeping the existing FPS and inference badges)
fps = stats.get('fps', None)
- # Try all possible keys for inference time
inference = stats.get('inference', stats.get('detection_time', stats.get('detection_time_ms', None)))
- model = stats.get('model', stats.get('model_name', '-'))
- device = stats.get('device', stats.get('device_name', '-'))
- # Update overlay
- self.overlay.update_overlay(model, device, cars, trucks, peds, tlights, motorcycles)
- # Update FPS and Inference badges
+
if fps is not None:
self.fps_badge.setText(f"FPS: {fps:.2f}")
else:
self.fps_badge.setText("FPS: --")
+
if inference is not None:
self.inference_badge.setText(f"Inference: {inference:.1f} ms")
else:
self.inference_badge.setText("Inference: -- ms")
+
+ # Update performance panel (simplified to focus on essential metrics)
+ self.gpu_usage_label.setText(f"GPU Usage: {stats.get('gpu_usage', 0):.1f}%")
+ self.memory_usage_label.setText(f"Memory: {stats.get('memory_usage', 0):.1f} MB")
+ self.processing_time_label.setText(f"Processing: {stats.get('processing_time', inference if inference else 0):.1f} ms")
def update_progress(self, value, max_value, timestamp):
self.progress.setMaximum(max_value)
self.progress.setValue(value)
- # Format timestamp as string (e.g., "00:00 / 00:00" or just str)
if isinstance(timestamp, float) or isinstance(timestamp, int):
timestamp_str = f"{timestamp:.2f}"
else:
timestamp_str = str(timestamp)
self.timestamp.setText(timestamp_str)
+
+ def update_multi_camera_feed(self, camera_position, pixmap, object_count=0):
+ """Update specific camera feed in multi-camera mode"""
+ if self.multi_camera_mode:
+ self.multi_cam_widget.update_camera_feed(camera_position, pixmap, object_count)
+
+ def get_smart_intersection_config(self):
+ """Get current smart intersection configuration"""
+ return {
+ 'enabled': self.smart_intersection_mode,
+ 'multi_camera': self.multi_camera_mode,
+ 'scene_analytics': self.scene_analytics_toggle.isChecked(),
+ 'multi_tracking': self.multi_tracking_toggle.isChecked(),
+ 'speed_estimation': self.speed_estimation_toggle.isChecked()
+ }
diff --git a/qt_app_pyside1/utils/__pycache__/__init__.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/__init__.cpython-311.pyc
index 547e3f0..0486070 100644
Binary files a/qt_app_pyside1/utils/__pycache__/__init__.cpython-311.pyc and b/qt_app_pyside1/utils/__pycache__/__init__.cpython-311.pyc differ
diff --git a/qt_app_pyside1/utils/__pycache__/annotation_utils.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/annotation_utils.cpython-311.pyc
index 4434a6b..09d9c21 100644
Binary files a/qt_app_pyside1/utils/__pycache__/annotation_utils.cpython-311.pyc and b/qt_app_pyside1/utils/__pycache__/annotation_utils.cpython-311.pyc differ
diff --git a/qt_app_pyside1/utils/__pycache__/crosswalk_utils2.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/crosswalk_utils2.cpython-311.pyc
index 375db43..dc91494 100644
Binary files a/qt_app_pyside1/utils/__pycache__/crosswalk_utils2.cpython-311.pyc and b/qt_app_pyside1/utils/__pycache__/crosswalk_utils2.cpython-311.pyc differ
diff --git a/qt_app_pyside1/utils/__pycache__/enhanced_annotation_utils.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/enhanced_annotation_utils.cpython-311.pyc
index df27d1d..3ddb3cf 100644
Binary files a/qt_app_pyside1/utils/__pycache__/enhanced_annotation_utils.cpython-311.pyc and b/qt_app_pyside1/utils/__pycache__/enhanced_annotation_utils.cpython-311.pyc differ
diff --git a/qt_app_pyside1/utils/__pycache__/helpers.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/helpers.cpython-311.pyc
index 74bb0c3..d916ae3 100644
Binary files a/qt_app_pyside1/utils/__pycache__/helpers.cpython-311.pyc and b/qt_app_pyside1/utils/__pycache__/helpers.cpython-311.pyc differ
diff --git a/qt_app_pyside1/utils/__pycache__/traffic_light_utils.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/traffic_light_utils.cpython-311.pyc
index 451cde3..6ba4b8b 100644
Binary files a/qt_app_pyside1/utils/__pycache__/traffic_light_utils.cpython-311.pyc and b/qt_app_pyside1/utils/__pycache__/traffic_light_utils.cpython-311.pyc differ