From 6d9a27f1e8b8f7c15a3dc280eda5ef4254265de1 Mon Sep 17 00:00:00 2001 From: jatin Date: Tue, 26 Aug 2025 13:07:59 -0700 Subject: [PATCH] Final repository --- detection_openvino.py | 988 +------- detection_openvino_async.py | 1187 ++++++++- ...d_light_violation_pipeline.cpython-311.pyc | Bin 17473 -> 17490 bytes .../__pycache__/splash.cpython-311.pyc | Bin 2231 -> 2248 bytes qt_app_pyside1/config.json | 9 +- .../__pycache__/__init__.cpython-311.pyc | Bin 168 -> 197 bytes .../analytics_controller.cpython-311.pyc | Bin 17776 -> 17798 bytes .../bytetrack_tracker.cpython-311.pyc | Bin 29615 -> 29632 bytes .../__pycache__/model_manager.cpython-311.pyc | Bin 23585 -> 28982 bytes .../performance_overlay.cpython-311.pyc | Bin 3462 -> 3484 bytes ...d_light_violation_detector.cpython-311.pyc | Bin 13400 -> 13417 bytes .../video_controller_new.cpython-311.pyc | Bin 79379 -> 91203 bytes qt_app_pyside1/controllers/model_manager.py | 156 +- .../controllers/video_controller.py | 9 + .../controllers/video_controller_new.py | 2243 ++++++++++++++++- qt_app_pyside1/finale/views/settings_view.py | 2 +- qt_app_pyside1/requirements.txt | Bin 1662 -> 2480 bytes .../ui/__pycache__/__init__.cpython-311.pyc | Bin 159 -> 188 bytes .../__pycache__/analytics_tab.cpython-311.pyc | Bin 46409 -> 27416 bytes .../__pycache__/config_panel.cpython-311.pyc | Bin 37597 -> 40327 bytes .../ui/__pycache__/export_tab.cpython-311.pyc | Bin 21754 -> 21771 bytes .../global_status_panel.cpython-311.pyc | Bin 2992 -> 3014 bytes .../live_multi_cam_tab.cpython-311.pyc | Bin 16985 -> 17007 bytes .../__pycache__/main_window.cpython-311.pyc | Bin 45928 -> 84164 bytes .../performance_graphs.cpython-311.pyc | Bin 17781 -> 42801 bytes .../video_detection_tab.cpython-311.pyc | Bin 20485 -> 58687 bytes .../violations_tab.cpython-311.pyc | Bin 21117 -> 21134 bytes qt_app_pyside1/ui/analytics_tab.py | 1151 ++++----- qt_app_pyside1/ui/config_panel.py | 57 +- qt_app_pyside1/ui/main_window.py | 745 +++++- qt_app_pyside1/ui/performance_graphs.py | 790 +++++- qt_app_pyside1/ui/video_detection_tab.py | 1055 +++++++- .../__pycache__/__init__.cpython-311.pyc | Bin 226 -> 243 bytes .../annotation_utils.cpython-311.pyc | Bin 11946 -> 11968 bytes .../crosswalk_utils2.cpython-311.pyc | Bin 20617 -> 20630 bytes .../enhanced_annotation_utils.cpython-311.pyc | Bin 14929 -> 14951 bytes .../utils/__pycache__/helpers.cpython-311.pyc | Bin 10775 -> 10792 bytes .../traffic_light_utils.cpython-311.pyc | Bin 27367 -> 27389 bytes 38 files changed, 6600 insertions(+), 1792 deletions(-) diff --git a/detection_openvino.py b/detection_openvino.py index b62e000..81b3289 100644 --- a/detection_openvino.py +++ b/detection_openvino.py @@ -7,7 +7,6 @@ import cv2 import numpy as np from pathlib import Path from typing import List, Dict, Tuple, Optional -from red_light_violation_pipeline import RedLightViolationPipeline # --- Install required packages if missing --- try: @@ -50,7 +49,10 @@ COCO_CLASSES = { } # Traffic-related classes we're interested in (using standard COCO indices) -TRAFFIC_CLASS_NAMES = COCO_CLASSES +TRAFFIC_CLASS_NAMES = [ + 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', + 'traffic light', 'stop sign', 'parking meter' +] # --- Model Conversion and Quantization --- def convert_yolo_to_openvino(model_name: str = "yolo11x", half: bool = True) -> Path: @@ -76,266 +78,6 @@ def quantize_openvino_model(ov_xml: Path, model_name: str = "yolo11x") -> Path: print("Quantization requires a calibration dataset. Skipping actual quantization in this demo.") return ov_xml # Return FP32 if no quantization -# --- OpenVINO Inference Pipeline --- -class OpenVINOYOLODetector: - def __init__(self, model_xml: Path, device: str = "AUTO"): - self.core = ov.Core() - self.device = device - self.model = self.core.read_model(model_xml) - self.input_shape = self.model.inputs[0].shape - self.input_height = self.input_shape[2] - self.input_width = self.input_shape[3] - self.ov_config = {} - if device != "CPU": - self.model.reshape({0: [1, 3, 640, 640]}) - if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices): - self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"} - self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config) - self.output_layer = self.compiled_model.output(0) - - def preprocess(self, frame: np.ndarray) -> np.ndarray: - img = cv2.resize(frame, (self.input_width, self.input_height)) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = img.astype(np.float32) / 255.0 - img = img.transpose(2, 0, 1)[None] - return img - - def infer(self, frame: np.ndarray, conf_threshold: float = 0.25) -> List[Dict]: - input_tensor = self.preprocess(frame) - output = self.compiled_model([input_tensor])[self.output_layer] - return self.postprocess(output, frame.shape, conf_threshold) - - def postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]: - # Output: (1, 84, 8400) or (84, 8400) or (8400, 84) - if output.ndim == 3: - output = np.squeeze(output) - if output.shape[0] == 84: - output = output.T # (8400, 84) - boxes = output[:, :4] - scores = output[:, 4:] - class_ids = np.argmax(scores, axis=1) - confidences = np.max(scores, axis=1) - detections = [] - h, w = frame_shape[:2] - for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)): - if score < conf_threshold: - continue - x_c, y_c, bw, bh = box - # If normalized, scale to input size - if all(0.0 <= v <= 1.0 for v in box): - x_c *= self.input_width - y_c *= self.input_height - bw *= self.input_width - bh *= self.input_height - # Scale to original frame size - scale_x = w / self.input_width - scale_y = h / self.input_height - x_c *= scale_x - y_c *= scale_y - bw *= scale_x - bh *= scale_y - x1 = int(round(x_c - bw / 2)) - y1 = int(round(y_c - bh / 2)) - x2 = int(round(x_c + bw / 2)) - y2 = int(round(y_c + bh / 2)) - x1 = max(0, min(x1, w - 1)) - y1 = max(0, min(y1, h - 1)) - x2 = max(0, min(x2, w - 1)) - y2 = max(0, min(y2, h - 1)) - if x2 <= x1 or y2 <= y1: - continue - # Only keep class 9 as traffic light, rename if found - if class_id == 9: - class_name = "traffic light" - elif class_id < len(TRAFFIC_CLASS_NAMES): - class_name = TRAFFIC_CLASS_NAMES[class_id] - else: - continue # Remove unknown/other classes - detections.append({ - 'bbox': [x1, y1, x2, y2], - 'confidence': float(score), - 'class_id': int(class_id), - 'class_name': class_name - }) - return detections - - def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray: - # 80+ visually distinct colors for COCO classes (BGR) - COCO_COLORS = [ - (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49), - (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187), - (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236), - (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199), - (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49), - (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187), - (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236), - (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199), - (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49), - (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187), - (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), - (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), - (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), - (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), - (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49) - ] - for det in detections: - x1, y1, x2, y2 = det['bbox'] - label = f"{det['class_name']} {det['confidence']:.2f}" - color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)] - cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness) - cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) - return frame - -# --- Video/Image/Live Inference --- -def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None): - if isinstance(source, str) and not os.path.exists(source): - print(f"Downloading sample video: {source}") - import requests - url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4" - r = requests.get(url) - with open(source, 'wb') as f: - f.write(r.content) - cap = cv2.VideoCapture(source) - if not cap.isOpened(): - print(f"Failed to open video source: {source}") - return - window_name = "YOLOv11x + OpenVINO Detection" - if use_popup: - cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE) - frame_count = 0 - times = [] - while True: - ret, frame = cap.read() - if not ret: - break - if flip: - frame = cv2.flip(frame, 1) - if video_width: - scale = video_width / max(frame.shape[:2]) - frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) - start = time.time() - detections = detector.infer(frame, conf_threshold=conf_threshold) - frame = detector.draw(frame, detections) - elapsed = time.time() - start - times.append(elapsed) - if len(times) > 200: - times.pop(0) - fps = 1.0 / np.mean(times) if times else 0 - cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2) - if use_popup: - cv2.imshow(window_name, frame) - if cv2.waitKey(1) & 0xFF == 27: - break - else: - cv2.imshow(window_name, frame) - if cv2.waitKey(1) & 0xFF == 27: - break - frame_count += 1 - cap.release() - cv2.destroyAllWindows() - -# --- Main Entrypoint --- -if __name__ == "__main__": - # Choose model: yolo11x or yolo11n, etc. - MODEL_NAME = "yolo11x" - DEVICE = "AUTO" # or "CPU", "GPU" - # Step 1: Convert model if needed - ov_xml = convert_yolo_to_openvino(MODEL_NAME) - # Step 2: Quantize (optional, demo skips actual quantization) - ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME) - # Step 3: Create detector - detector = OpenVINOYOLODetector(ov_xml, device=DEVICE) - # Step 4: Run on webcam, video, or image - # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg" - run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280) -# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25) -# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25) -# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed. - -import numpy as np -import cv2 - -def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None): - """ - output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85]) - conf_threshold: minimum confidence - iou_threshold: for NMS - input_shape: model input size (w, h) - original_shape: original image size (w, h) - """ - # 1. Squeeze batch dimension - output = np.squeeze(output) # [25200, 85] - - # 2. Split predictions - boxes = output[:, :4] - obj_conf = output[:, 4] - class_scores = output[:, 5:] - - # 3. Get class with highest score - class_ids = np.argmax(class_scores, axis=1) - class_conf = class_scores[np.arange(len(class_scores)), class_ids] - - # 4. Multiply objectness confidence with class confidence - scores = obj_conf * class_conf - - # 5. Filter by confidence threshold - mask = scores > conf_threshold - boxes = boxes[mask] - scores = scores[mask] - class_ids = class_ids[mask] - - if original_shape is not None: - # Rescale boxes from input_shape to original image shape - input_w, input_h = input_shape - orig_w, orig_h = original_shape - scale_x = orig_w / input_w - scale_y = orig_h / input_h - - boxes[:, 0] *= scale_x # x1 - boxes[:, 1] *= scale_y # y1 - boxes[:, 2] *= scale_x # x2 - boxes[:, 3] *= scale_y # y2 - - # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS - boxes_xywh = [] - for box in boxes: - x1, y1, x2, y2 = box - boxes_xywh.append([x1, y1, x2 - x1, y2 - y1]) - - # 7. Apply NMS - indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) - - # 8. Return filtered boxes - result_boxes = [] - result_scores = [] - result_classes = [] - if len(boxes) > 0 and len(scores) > 0: - indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) - if len(indices) > 0: - indices = np.array(indices).flatten() - for i in indices: - i = int(i) - result_boxes.append(boxes[i]) - result_scores.append(scores[i]) - result_classes.append(class_ids[i]) - return result_boxes, result_scores, result_classes - -import os -import time -import numpy as np -import cv2 -from pathlib import Path -from typing import List, Dict, Optional - -# Only traffic-related classes for detection -TRAFFIC_CLASS_NAMES = [ - 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', - 'traffic light', 'stop sign', 'parking meter' -] - class OpenVINOVehicleDetector: def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4): import openvino as ov @@ -354,8 +96,11 @@ class OpenVINOVehicleDetector: self._inference_times = [] self._start_time = time.time() self._frame_count = 0 + # Model selection logic self.model_path = self._find_best_model(model_path, use_quantized) + print(f"🎯 OpenVINOVehicleDetector: Using model: {self.model_path}") + self.core = ov.Core() self.model = self.core.read_model(self.model_path) # Always reshape to static shape before accessing .shape @@ -374,10 +119,47 @@ class OpenVINOVehicleDetector: self.output_layer = self.compiled_model.output(0) def _find_best_model(self, model_path, use_quantized): + # If a specific model path is provided, use it directly + if model_path and Path(model_path).exists(): + print(f"🎯 Using provided model path: {model_path}") + return str(model_path) + + # If no model path provided, extract model name from path or default to yolo11x + model_name = "yolo11x" # Default fallback + if model_path: + # Try to extract model name from path + path_obj = Path(model_path) + if "yolo11n" in str(path_obj).lower(): + model_name = "yolo11n" + elif "yolo11s" in str(path_obj).lower(): + model_name = "yolo11s" + elif "yolo11m" in str(path_obj).lower(): + model_name = "yolo11m" + elif "yolo11l" in str(path_obj).lower(): + model_name = "yolo11l" + elif "yolo11x" in str(path_obj).lower(): + model_name = "yolo11x" + + print(f"🔍 Searching for {model_name} model files...") + # Priority: quantized IR > IR > .pt search_paths = [ - Path(model_path) if model_path else None, - Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None, + Path(f"{model_name}_openvino_int8_model/{model_name}.xml") if use_quantized else None, + Path(f"{model_name}_openvino_model/{model_name}.xml"), + Path(f"rcb/{model_name}_openvino_model/{model_name}.xml"), + Path(f"{model_name}.xml"), + Path(f"rcb/{model_name}.xml"), + Path(f"{model_name}.pt"), + Path(f"rcb/{model_name}.pt") + ] + + for p in search_paths: + if p and p.exists(): + print(f"✅ Found model: {p}") + return str(p) + + # Fallback to any yolo11x if specific model not found + fallback_paths = [ Path("yolo11x_openvino_model/yolo11x.xml"), Path("rcb/yolo11x_openvino_model/yolo11x.xml"), Path("yolo11x.xml"), @@ -385,10 +167,13 @@ class OpenVINOVehicleDetector: Path("yolo11x.pt"), Path("rcb/yolo11x.pt") ] - for p in search_paths: + + for p in fallback_paths: if p and p.exists(): + print(f"⚠️ Using fallback model: {p}") return str(p) - raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.") + + raise FileNotFoundError(f"No suitable {model_name} model found for OpenVINO.") def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]: if conf_threshold is None: @@ -397,9 +182,9 @@ class OpenVINOVehicleDetector: input_tensor = self._preprocess(frame) output = self.compiled_model([input_tensor])[self.output_layer] # Debug: print raw output shape - print(f"[DEBUG] Model output shape: {output.shape}") + # print(f"[DEBUG] Model output shape: {output.shape}") detections = self._postprocess(output, frame.shape, conf_threshold) - print(f"[DEBUG] Detections after postprocess: {len(detections)}") + # print(f"[DEBUG] Detections after postprocess: {len(detections)}") elapsed = time.time() - start self._inference_times.append(elapsed) self._frame_count += 1 @@ -471,7 +256,7 @@ class OpenVINOVehicleDetector: 'class_id': int(class_id), 'class_name': class_name }) - print(f"[DEBUG] Raw detections before NMS: {len(detections)}") + # print(f"[DEBUG] Raw detections before NMS: {len(detections)}") # Apply NMS if len(detections) > 0: boxes = np.array([det['bbox'] for det in detections]) @@ -484,7 +269,7 @@ class OpenVINOVehicleDetector: else: indices = [] detections = [detections[int(i)] for i in indices] if len(indices) > 0 else [] - print(f"[DEBUG] Detections after NMS: {len(detections)}") + # print(f"[DEBUG] Detections after NMS: {len(detections)}") return detections def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray: @@ -516,661 +301,12 @@ class OpenVINOVehicleDetector: cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness) cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) return frame + + def get_device(self): + """Get the device being used for inference""" + return self.device -# --- Video/Image/Live Inference --- -def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None): - if isinstance(source, str) and not os.path.exists(source): - print(f"Downloading sample video: {source}") - import requests - url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4" - r = requests.get(url) - with open(source, 'wb') as f: - f.write(r.content) - cap = cv2.VideoCapture(source) - if not cap.isOpened(): - print(f"Failed to open video source: {source}") - return - window_name = "YOLOv11x + OpenVINO Detection" - if use_popup: - cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE) - frame_count = 0 - times = [] - while True: - ret, frame = cap.read() - if not ret: - break - if flip: - frame = cv2.flip(frame, 1) - if video_width: - scale = video_width / max(frame.shape[:2]) - frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) - start = time.time() - detections = detector.infer(frame, conf_threshold=conf_threshold) - frame = detector.draw(frame, detections) - elapsed = time.time() - start - times.append(elapsed) - if len(times) > 200: - times.pop(0) - fps = 1.0 / np.mean(times) if times else 0 - cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2) - if use_popup: - cv2.imshow(window_name, frame) - if cv2.waitKey(1) & 0xFF == 27: - break - else: - cv2.imshow(window_name, frame) - if cv2.waitKey(1) & 0xFF == 27: - break - frame_count += 1 - cap.release() - cv2.destroyAllWindows() - -# --- Main Entrypoint --- if __name__ == "__main__": - # Choose model: yolo11x or yolo11n, etc. - MODEL_NAME = "yolo11x" - - DEVICE = "AUTO" # or "CPU", "GPU" - # Step 1: Convert model if needed - ov_xml = convert_yolo_to_openvino(MODEL_NAME) - # Step 2: Quantize (optional, demo skips actual quantization) - ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME) - # Step 3: Create detector - detector = OpenVINOYOLODetector(ov_xml, device=DEVICE) - # Step 4: Run on webcam, video, or image - # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg" - run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280) -# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25) -# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25) -# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed. - -import numpy as np -import cv2 - -def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None): - """ - output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85]) - conf_threshold: minimum confidence - iou_threshold: for NMS - input_shape: model input size (w, h) - original_shape: original image size (w, h) - """ - # 1. Squeeze batch dimension - output = np.squeeze(output) # [25200, 85] - - # 2. Split predictions - boxes = output[:, :4] - obj_conf = output[:, 4] - class_scores = output[:, 5:] - - # 3. Get class with highest score - class_ids = np.argmax(class_scores, axis=1) - class_conf = class_scores[np.arange(len(class_scores)), class_ids] - - # 4. Multiply objectness confidence with class confidence - scores = obj_conf * class_conf - - # 5. Filter by confidence threshold - mask = scores > conf_threshold - boxes = boxes[mask] - scores = scores[mask] - class_ids = class_ids[mask] - - if original_shape is not None: - # Rescale boxes from input_shape to original image shape - input_w, input_h = input_shape - orig_w, orig_h = original_shape - scale_x = orig_w / input_w - scale_y = orig_h / input_h - - boxes[:, 0] *= scale_x # x1 - boxes[:, 1] *= scale_y # y1 - boxes[:, 2] *= scale_x # x2 - boxes[:, 3] *= scale_y # y2 - - # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS - boxes_xywh = [] - for box in boxes: - x1, y1, x2, y2 = box - boxes_xywh.append([x1, y1, x2 - x1, y2 - y1]) - - # 7. Apply NMS - indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) - - # 8. Return filtered boxes - result_boxes = [] - result_scores = [] - result_classes = [] - if len(boxes) > 0 and len(scores) > 0: - indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) - if len(indices) > 0: - indices = np.array(indices).flatten() - for i in indices: - i = int(i) - result_boxes.append(boxes[i]) - result_scores.append(scores[i]) - result_classes.append(class_ids[i]) - return result_boxes, result_scores, result_classes - -import os -import time -import numpy as np -import cv2 -from pathlib import Path -from typing import List, Dict, Optional - -# Only traffic-related classes for detection -TRAFFIC_CLASS_NAMES = [ - 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', - 'traffic light', 'stop sign', 'parking meter' -] - -class OpenVINOVehicleDetector: - def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4): - import openvino as ov - self.device = device - self.confidence_threshold = confidence_threshold - self.ocr_reader = None - self.class_names = TRAFFIC_CLASS_NAMES - self.performance_stats = { - 'fps': 0, - 'avg_inference_time': 0, - 'frames_processed': 0, - 'backend': f"OpenVINO-{device}", - 'total_detections': 0, - 'detection_rate': 0 - } - self._inference_times = [] - self._start_time = time.time() - self._frame_count = 0 - # Model selection logic - self.model_path = self._find_best_model(model_path, use_quantized) - self.core = ov.Core() - self.model = self.core.read_model(self.model_path) - # Always reshape to static shape before accessing .shape - self.model.reshape({0: [1, 3, 640, 640]}) - self.input_shape = self.model.inputs[0].shape - self.input_height = self.input_shape[2] - self.input_width = self.input_shape[3] - self.ov_config = {} - if device != "CPU": - # Already reshaped above, so nothing more needed here - pass - if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices): - self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"} - self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config) - - self.output_layer = self.compiled_model.output(0) - - def _find_best_model(self, model_path, use_quantized): - # Priority: quantized IR > IR > .pt - search_paths = [ - Path(model_path) if model_path else None, - Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None, - Path("yolo11x_openvino_model/yolo11x.xml"), - Path("rcb/yolo11x_openvino_model/yolo11x.xml"), - Path("yolo11x.xml"), - Path("rcb/yolo11x.xml"), - Path("yolo11x.pt"), - Path("rcb/yolo11x.pt") - ] - for p in search_paths: - if p and p.exists(): - return str(p) - raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.") - - def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]: - if conf_threshold is None: - conf_threshold = 0.1 # Lowered for debugging - start = time.time() - input_tensor = self._preprocess(frame) - output = self.compiled_model([input_tensor])[self.output_layer] - # Debug: print raw output shape - print(f"[DEBUG] Model output shape: {output.shape}") - detections = self._postprocess(output, frame.shape, conf_threshold) - print(f"[DEBUG] Detections after postprocess: {len(detections)}") - elapsed = time.time() - start - self._inference_times.append(elapsed) - self._frame_count += 1 - self.performance_stats['frames_processed'] = self._frame_count - self.performance_stats['total_detections'] += len(detections) - if len(self._inference_times) > 100: - self._inference_times.pop(0) - self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0 - total_time = time.time() - self._start_time - self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0 - return detections - - def _preprocess(self, frame: np.ndarray) -> np.ndarray: - img = cv2.resize(frame, (self.input_width, self.input_height)) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = img.astype(np.float32) / 255.0 - img = img.transpose(2, 0, 1)[None] - return img - - def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]: - # Output: (1, 84, 8400) or (84, 8400) or (8400, 84) - if output.ndim == 3: - output = np.squeeze(output) - if output.shape[0] == 84: - output = output.T # (8400, 84) - boxes = output[:, :4] - scores = output[:, 4:] - class_ids = np.argmax(scores, axis=1) - confidences = np.max(scores, axis=1) - detections = [] - h, w = frame_shape[:2] - for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)): - if score < conf_threshold: - continue - x_c, y_c, bw, bh = box - # If normalized, scale to input size - if all(0.0 <= v <= 1.0 for v in box): - x_c *= self.input_width - y_c *= self.input_height - bw *= self.input_width - bh *= self.input_height - # Scale to original frame size - scale_x = w / self.input_width - scale_y = h / self.input_height - x_c *= scale_x - y_c *= scale_y - bw *= scale_x - bh *= scale_y - x1 = int(round(x_c - bw / 2)) - y1 = int(round(y_c - bh / 2)) - x2 = int(round(x_c + bw / 2)) - y2 = int(round(y_c + bh / 2)) - x1 = max(0, min(x1, w - 1)) - y1 = max(0, min(y1, h - 1)) - x2 = max(0, min(x2, w - 1)) - y2 = max(0, min(y2, h - 1)) - if x2 <= x1 or y2 <= y1: - continue - # Only keep class 9 as traffic light, rename if found - if class_id == 9: - class_name = "traffic light" - elif class_id < len(TRAFFIC_CLASS_NAMES): - class_name = TRAFFIC_CLASS_NAMES[class_id] - else: - continue # Remove unknown/other classes - detections.append({ - 'bbox': [x1, y1, x2, y2], - 'confidence': float(score), - 'class_id': int(class_id), - 'class_name': class_name - }) - print(f"[DEBUG] Raw detections before NMS: {len(detections)}") - # Apply NMS - if len(detections) > 0: - boxes = np.array([det['bbox'] for det in detections]) - scores = np.array([det['confidence'] for det in detections]) - indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5) - if isinstance(indices, (list, tuple)) and len(indices) > 0: - indices = np.array(indices).flatten() - elif isinstance(indices, np.ndarray) and indices.size > 0: - indices = indices.flatten() - else: - indices = [] - detections = [detections[int(i)] for i in indices] if len(indices) > 0 else [] - print(f"[DEBUG] Detections after NMS: {len(detections)}") - return detections - - def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray: - # 80+ visually distinct colors for COCO classes (BGR) - COCO_COLORS = [ - (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49), - (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187), - (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236), - (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199), - (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49), - (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187), - (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236), - (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199), - (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49), - (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187), - (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), - (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), - (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), - (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), - (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49) - ] - for det in detections: - x1, y1, x2, y2 = det['bbox'] - label = f"{det['class_name']} {det['confidence']:.2f}" - color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)] - cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness) - cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) - return frame - -# --- Video/Image/Live Inference --- -def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None): - if isinstance(source, str) and not os.path.exists(source): - print(f"Downloading sample video: {source}") - import requests - url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4" - r = requests.get(url) - with open(source, 'wb') as f: - f.write(r.content) - cap = cv2.VideoCapture(source) - if not cap.isOpened(): - print(f"Failed to open video source: {source}") - return - window_name = "YOLOv11x + OpenVINO Detection" - if use_popup: - cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE) - frame_count = 0 - times = [] - while True: - ret, frame = cap.read() - if not ret: - break - if flip: - frame = cv2.flip(frame, 1) - if video_width: - scale = video_width / max(frame.shape[:2]) - frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) - start = time.time() - detections = detector.infer(frame, conf_threshold=conf_threshold) - frame = detector.draw(frame, detections) - elapsed = time.time() - start - times.append(elapsed) - if len(times) > 200: - times.pop(0) - fps = 1.0 / np.mean(times) if times else 0 - cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2) - if use_popup: - cv2.imshow(window_name, frame) - if cv2.waitKey(1) & 0xFF == 27: - break - else: - cv2.imshow(window_name, frame) - if cv2.waitKey(1) & 0xFF == 27: - break - frame_count += 1 - cap.release() - cv2.destroyAllWindows() - -# --- Main Entrypoint --- -if __name__ == "__main__": - # Choose model: yolo11x or yolo11n, etc. - MODEL_NAME = "yolo11x" - - DEVICE = "AUTO" # or "CPU", "GPU" - # Step 1: Convert model if needed - ov_xml = convert_yolo_to_openvino(MODEL_NAME) - # Step 2: Quantize (optional, demo skips actual quantization) - ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME) - # Step 3: Create detector - detector = OpenVINOYOLODetector(ov_xml, device=DEVICE) - # Step 4: Run on webcam, video, or image - # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg" - run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280) -# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25) -# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25) -# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed. - -import numpy as np -import cv2 - -def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None): - """ - output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85]) - conf_threshold: minimum confidence - iou_threshold: for NMS - input_shape: model input size (w, h) - original_shape: original image size (w, h) - """ - # 1. Squeeze batch dimension - output = np.squeeze(output) # [25200, 85] - - # 2. Split predictions - boxes = output[:, :4] - obj_conf = output[:, 4] - class_scores = output[:, 5:] - - # 3. Get class with highest score - class_ids = np.argmax(class_scores, axis=1) - class_conf = class_scores[np.arange(len(class_scores)), class_ids] - - # 4. Multiply objectness confidence with class confidence - scores = obj_conf * class_conf - - # 5. Filter by confidence threshold - mask = scores > conf_threshold - boxes = boxes[mask] - scores = scores[mask] - class_ids = class_ids[mask] - - if original_shape is not None: - # Rescale boxes from input_shape to original image shape - input_w, input_h = input_shape - orig_w, orig_h = original_shape - scale_x = orig_w / input_w - scale_y = orig_h / input_h - - boxes[:, 0] *= scale_x # x1 - boxes[:, 1] *= scale_y # y1 - boxes[:, 2] *= scale_x # x2 - boxes[:, 3] *= scale_y # y2 - - # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS - boxes_xywh = [] - for box in boxes: - x1, y1, x2, y2 = box - boxes_xywh.append([x1, y1, x2 - x1, y2 - y1]) - - # 7. Apply NMS - indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) - - # 8. Return filtered boxes - result_boxes = [] - result_scores = [] - result_classes = [] - if len(boxes) > 0 and len(scores) > 0: - indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) - if len(indices) > 0: - indices = np.array(indices).flatten() - for i in indices: - i = int(i) - result_boxes.append(boxes[i]) - result_scores.append(scores[i]) - result_classes.append(class_ids[i]) - return result_boxes, result_scores, result_classes - -import os -import time -import numpy as np -import cv2 -from pathlib import Path -from typing import List, Dict, Optional - -# Only traffic-related classes for detection -TRAFFIC_CLASS_NAMES = [ - 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', - 'traffic light', 'stop sign', 'parking meter' -] - -class OpenVINOVehicleDetector: - def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4): - import openvino as ov - self.device = device - self.confidence_threshold = confidence_threshold - self.ocr_reader = None - self.class_names = TRAFFIC_CLASS_NAMES - self.performance_stats = { - 'fps': 0, - 'avg_inference_time': 0, - 'frames_processed': 0, - 'backend': f"OpenVINO-{device}", - 'total_detections': 0, - 'detection_rate': 0 - } - self._inference_times = [] - self._start_time = time.time() - self._frame_count = 0 - # Model selection logic - self.model_path = self._find_best_model(model_path, use_quantized) - self.core = ov.Core() - self.model = self.core.read_model(self.model_path) - # Always reshape to static shape before accessing .shape - self.model.reshape({0: [1, 3, 640, 640]}) - self.input_shape = self.model.inputs[0].shape - self.input_height = self.input_shape[2] - self.input_width = self.input_shape[3] - self.ov_config = {} - if device != "CPU": - # Already reshaped above, so nothing more needed here - pass - if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices): - self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"} - self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config) - - self.output_layer = self.compiled_model.output(0) - - def _find_best_model(self, model_path, use_quantized): - # Priority: quantized IR > IR > .pt - search_paths = [ - Path(model_path) if model_path else None, - Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None, - Path("yolo11x_openvino_model/yolo11x.xml"), - Path("rcb/yolo11x_openvino_model/yolo11x.xml"), - Path("yolo11x.xml"), - Path("rcb/yolo11x.xml"), - Path("yolo11x.pt"), - Path("rcb/yolo11x.pt") - ] - for p in search_paths: - if p and p.exists(): - return str(p) - raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.") - - def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]: - if conf_threshold is None: - conf_threshold = 0.1 # Lowered for debugging - start = time.time() - input_tensor = self._preprocess(frame) - output = self.compiled_model([input_tensor])[self.output_layer] - # Debug: print raw output shape - print(f"[DEBUG] Model output shape: {output.shape}") - detections = self._postprocess(output, frame.shape, conf_threshold) - print(f"[DEBUG] Detections after postprocess: {len(detections)}") - elapsed = time.time() - start - self._inference_times.append(elapsed) - self._frame_count += 1 - self.performance_stats['frames_processed'] = self._frame_count - self.performance_stats['total_detections'] += len(detections) - if len(self._inference_times) > 100: - self._inference_times.pop(0) - self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0 - total_time = time.time() - self._start_time - self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0 - return detections - - def _preprocess(self, frame: np.ndarray) -> np.ndarray: - img = cv2.resize(frame, (self.input_width, self.input_height)) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = img.astype(np.float32) / 255.0 - img = img.transpose(2, 0, 1)[None] - return img - - def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]: - # Output: (1, 84, 8400) or (84, 8400) or (8400, 84) - if output.ndim == 3: - output = np.squeeze(output) - if output.shape[0] == 84: - output = output.T # (8400, 84) - boxes = output[:, :4] - scores = output[:, 4:] - class_ids = np.argmax(scores, axis=1) - confidences = np.max(scores, axis=1) - detections = [] - h, w = frame_shape[:2] - for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)): - if score < conf_threshold: - continue - x_c, y_c, bw, bh = box - # If normalized, scale to input size - if all(0.0 <= v <= 1.0 for v in box): - x_c *= self.input_width - y_c *= self.input_height - bw *= self.input_width - bh *= self.input_height - # Scale to original frame size - scale_x = w / self.input_width - scale_y = h / self.input_height - x_c *= scale_x - y_c *= scale_y - bw *= scale_x - bh *= scale_y - x1 = int(round(x_c - bw / 2)) - y1 = int(round(y_c - bh / 2)) - x2 = int(round(x_c + bw / 2)) - y2 = int(round(y_c + bh / 2)) - x1 = max(0, min(x1, w - 1)) - y1 = max(0, min(y1, h - 1)) - x2 = max(0, min(x2, w - 1)) - y2 = max(0, min(y2, h - 1)) - if x2 <= x1 or y2 <= y1: - continue - # Only keep class 9 as traffic light, rename if found - if class_id == 9: - class_name = "traffic light" - elif class_id < len(TRAFFIC_CLASS_NAMES): - class_name = TRAFFIC_CLASS_NAMES[class_id] - else: - continue # Remove unknown/other classes - detections.append({ - 'bbox': [x1, y1, x2, y2], - 'confidence': float(score), - 'class_id': int(class_id), - 'class_name': class_name - }) - print(f"[DEBUG] Raw detections before NMS: {len(detections)}") - # Apply NMS - if len(detections) > 0: - boxes = np.array([det['bbox'] for det in detections]) - scores = np.array([det['confidence'] for det in detections]) - indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5) - if isinstance(indices, (list, tuple)) and len(indices) > 0: - indices = np.array(indices).flatten() - elif isinstance(indices, np.ndarray) and indices.size > 0: - indices = indices.flatten() - else: - indices = [] - detections = [detections[int(i)] for i in indices] if len(indices) > 0 else [] - print(f"[DEBUG] Detections after NMS: {len(detections)}") - return detections - - def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray: - # 80+ visually distinct colors for COCO classes (BGR) - COCO_COLORS = [ - (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49), - (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187), - (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236), - (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199), - (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49), - (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187), - (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236), - (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199), - (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49), - (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187), - (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), - (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), - (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), - (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), - (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), - (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49) - ] - for det in detections: - x1, y1, x2, y2 = det['bbox'] - label = f"{det['class_name']} {det['confidence']:.2f}" - color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)] - cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness) - cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) - return frame \ No newline at end of file + # Test the detector with YOLOv11n model + detector = OpenVINOVehicleDetector(model_path="yolo11n_openvino_model/yolo11n.xml") + print(f"Detector initialized with model: {detector.model_path}") diff --git a/detection_openvino_async.py b/detection_openvino_async.py index 6ea61ad..3c99641 100644 --- a/detection_openvino_async.py +++ b/detection_openvino_async.py @@ -1691,4 +1691,1189 @@ def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, inp result_boxes.append(boxes[i]) result_scores.append(scores[i]) result_classes.append(class_ids[i]) - return result_boxes, result_scores, result_classes \ No newline at end of file + return result_boxes, result_scores, result_classes + + + + + + + + + +# Detection logic using OpenVINO models (YOLO, etc.) + +import os +import sys +import time +import cv2 +import numpy as np +from pathlib import Path +from typing import List, Dict, Tuple, Optional +from red_light_violation_pipeline import RedLightViolationPipeline + +# --- Install required packages if missing --- +try: + import openvino as ov +except ImportError: + print("Installing openvino...") + os.system('pip install --quiet "openvino>=2024.0.0"') + import openvino as ov +try: + from ultralytics import YOLO +except ImportError: + print("Installing ultralytics...") + os.system('pip install --quiet "ultralytics==8.3.0"') + from ultralytics import YOLO +try: + import nncf +except ImportError: + print("Installing nncf...") + os.system('pip install --quiet "nncf>=2.9.0"') + import nncf + +# --- COCO dataset class names --- +COCO_CLASSES = { + 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', + 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', + 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', + 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', + 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', + 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', + 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', + 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', + 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', + 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', + 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', + 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', + 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', + 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', + 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', + 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush' +} + +# Traffic-related classes we're interested in (using standard COCO indices) +TRAFFIC_CLASS_NAMES = COCO_CLASSES + +# --- Model Conversion and Quantization --- +def convert_yolo_to_openvino(model_name: str = "yolo11x", half: bool = True) -> Path: + """Convert YOLOv11x PyTorch model to OpenVINO IR format.""" + pt_path = Path(f"{model_name}.pt") + ov_dir = Path(f"{model_name}_openvino_model") + ov_xml = ov_dir / f"{model_name}.xml" + if not ov_xml.exists(): + print(f"Exporting {pt_path} to OpenVINO IR...") + model = YOLO(str(pt_path)) + model.export(format="openvino", dynamic=True, half=half) + else: + print(f"OpenVINO IR already exists: {ov_xml}") + return ov_xml + +def quantize_openvino_model(ov_xml: Path, model_name: str = "yolo11x") -> Path: + """Quantize OpenVINO IR model to INT8 using NNCF.""" + int8_dir = Path(f"{model_name}_openvino_int8_model") + int8_xml = int8_dir / f"{model_name}.xml" + if int8_xml.exists(): + print(f"INT8 model already exists: {int8_xml}") + return int8_xml + print("Quantization requires a calibration dataset. Skipping actual quantization in this demo.") + return ov_xml # Return FP32 if no quantization + +# --- OpenVINO Inference Pipeline --- +class OpenVINOYOLODetector: + def __init__(self, model_xml: Path, device: str = "AUTO"): + self.core = ov.Core() + self.device = device + self.model = self.core.read_model(model_xml) + self.input_shape = self.model.inputs[0].shape + self.input_height = self.input_shape[2] + self.input_width = self.input_shape[3] + self.ov_config = {} + if device != "CPU": + self.model.reshape({0: [1, 3, 640, 640]}) + if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices): + self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"} + self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config) + self.output_layer = self.compiled_model.output(0) + + def preprocess(self, frame: np.ndarray) -> np.ndarray: + img = cv2.resize(frame, (self.input_width, self.input_height)) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = img.astype(np.float32) / 255.0 + img = img.transpose(2, 0, 1)[None] + return img + + def infer(self, frame: np.ndarray, conf_threshold: float = 0.25) -> List[Dict]: + input_tensor = self.preprocess(frame) + output = self.compiled_model([input_tensor])[self.output_layer] + return self.postprocess(output, frame.shape, conf_threshold) + + def postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]: + # Output: (1, 84, 8400) or (84, 8400) or (8400, 84) + if output.ndim == 3: + output = np.squeeze(output) + if output.shape[0] == 84: + output = output.T # (8400, 84) + boxes = output[:, :4] + scores = output[:, 4:] + class_ids = np.argmax(scores, axis=1) + confidences = np.max(scores, axis=1) + detections = [] + h, w = frame_shape[:2] + for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)): + if score < conf_threshold: + continue + x_c, y_c, bw, bh = box + # If normalized, scale to input size + if all(0.0 <= v <= 1.0 for v in box): + x_c *= self.input_width + y_c *= self.input_height + bw *= self.input_width + bh *= self.input_height + # Scale to original frame size + scale_x = w / self.input_width + scale_y = h / self.input_height + x_c *= scale_x + y_c *= scale_y + bw *= scale_x + bh *= scale_y + x1 = int(round(x_c - bw / 2)) + y1 = int(round(y_c - bh / 2)) + x2 = int(round(x_c + bw / 2)) + y2 = int(round(y_c + bh / 2)) + x1 = max(0, min(x1, w - 1)) + y1 = max(0, min(y1, h - 1)) + x2 = max(0, min(x2, w - 1)) + y2 = max(0, min(y2, h - 1)) + if x2 <= x1 or y2 <= y1: + continue + # Only keep class 9 as traffic light, rename if found + if class_id == 9: + class_name = "traffic light" + elif class_id < len(TRAFFIC_CLASS_NAMES): + class_name = TRAFFIC_CLASS_NAMES[class_id] + else: + continue # Remove unknown/other classes + detections.append({ + 'bbox': [x1, y1, x2, y2], + 'confidence': float(score), + 'class_id': int(class_id), + 'class_name': class_name + }) + return detections + + def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray: + # 80+ visually distinct colors for COCO classes (BGR) + COCO_COLORS = [ + (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49), + (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187), + (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236), + (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199), + (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49), + (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187), + (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236), + (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199), + (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49), + (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187), + (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), + (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), + (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), + (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), + (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49) + ] + for det in detections: + x1, y1, x2, y2 = det['bbox'] + label = f"{det['class_name']} {det['confidence']:.2f}" + color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)] + cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness) + cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) + return frame + +# --- Video/Image/Live Inference --- +def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None): + if isinstance(source, str) and not os.path.exists(source): + print(f"Downloading sample video: {source}") + import requests + url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4" + r = requests.get(url) + with open(source, 'wb') as f: + f.write(r.content) + cap = cv2.VideoCapture(source) + if not cap.isOpened(): + print(f"Failed to open video source: {source}") + return + window_name = "YOLOv11x + OpenVINO Detection" + if use_popup: + cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE) + frame_count = 0 + times = [] + while True: + ret, frame = cap.read() + if not ret: + break + if flip: + frame = cv2.flip(frame, 1) + if video_width: + scale = video_width / max(frame.shape[:2]) + frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) + start = time.time() + detections = detector.infer(frame, conf_threshold=conf_threshold) + frame = detector.draw(frame, detections) + elapsed = time.time() - start + times.append(elapsed) + if len(times) > 200: + times.pop(0) + fps = 1.0 / np.mean(times) if times else 0 + cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2) + if use_popup: + cv2.imshow(window_name, frame) + if cv2.waitKey(1) & 0xFF == 27: + break + else: + cv2.imshow(window_name, frame) + if cv2.waitKey(1) & 0xFF == 27: + break + frame_count += 1 + cap.release() + cv2.destroyAllWindows() + +# --- Main Entrypoint --- +if __name__ == "__main__": + # Choose model: yolo11x or yolo11n, etc. + MODEL_NAME = "yolo11x" + DEVICE = "AUTO" # or "CPU", "GPU" + # Step 1: Convert model if needed + ov_xml = convert_yolo_to_openvino(MODEL_NAME) + # Step 2: Quantize (optional, demo skips actual quantization) + ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME) + # Step 3: Create detector + detector = OpenVINOYOLODetector(ov_xml, device=DEVICE) + # Step 4: Run on webcam, video, or image + # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg" + run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280) +# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25) +# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25) +# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed. + +import numpy as np +import cv2 + +def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None): + """ + output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85]) + conf_threshold: minimum confidence + iou_threshold: for NMS + input_shape: model input size (w, h) + original_shape: original image size (w, h) + """ + # 1. Squeeze batch dimension + output = np.squeeze(output) # [25200, 85] + + # 2. Split predictions + boxes = output[:, :4] + obj_conf = output[:, 4] + class_scores = output[:, 5:] + + # 3. Get class with highest score + class_ids = np.argmax(class_scores, axis=1) + class_conf = class_scores[np.arange(len(class_scores)), class_ids] + + # 4. Multiply objectness confidence with class confidence + scores = obj_conf * class_conf + + # 5. Filter by confidence threshold + mask = scores > conf_threshold + boxes = boxes[mask] + scores = scores[mask] + class_ids = class_ids[mask] + + if original_shape is not None: + # Rescale boxes from input_shape to original image shape + input_w, input_h = input_shape + orig_w, orig_h = original_shape + scale_x = orig_w / input_w + scale_y = orig_h / input_h + + boxes[:, 0] *= scale_x # x1 + boxes[:, 1] *= scale_y # y1 + boxes[:, 2] *= scale_x # x2 + boxes[:, 3] *= scale_y # y2 + + # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS + boxes_xywh = [] + for box in boxes: + x1, y1, x2, y2 = box + boxes_xywh.append([x1, y1, x2 - x1, y2 - y1]) + + # 7. Apply NMS + indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) + + # 8. Return filtered boxes + result_boxes = [] + result_scores = [] + result_classes = [] + if len(boxes) > 0 and len(scores) > 0: + indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) + if len(indices) > 0: + indices = np.array(indices).flatten() + for i in indices: + i = int(i) + result_boxes.append(boxes[i]) + result_scores.append(scores[i]) + result_classes.append(class_ids[i]) + return result_boxes, result_scores, result_classes + +import os +import time +import numpy as np +import cv2 +from pathlib import Path +from typing import List, Dict, Optional + +# Only traffic-related classes for detection +TRAFFIC_CLASS_NAMES = [ + 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', + 'traffic light', 'stop sign', 'parking meter' +] + +class OpenVINOVehicleDetector: + def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4): + import openvino as ov + self.device = device + self.confidence_threshold = confidence_threshold + self.ocr_reader = None + self.class_names = TRAFFIC_CLASS_NAMES + self.performance_stats = { + 'fps': 0, + 'avg_inference_time': 0, + 'frames_processed': 0, + 'backend': f"OpenVINO-{device}", + 'total_detections': 0, + 'detection_rate': 0 + } + self._inference_times = [] + self._start_time = time.time() + self._frame_count = 0 + # Model selection logic + self.model_path = self._find_best_model(model_path, use_quantized) + self.core = ov.Core() + self.model = self.core.read_model(self.model_path) + # Always reshape to static shape before accessing .shape + self.model.reshape({0: [1, 3, 640, 640]}) + self.input_shape = self.model.inputs[0].shape + self.input_height = self.input_shape[2] + self.input_width = self.input_shape[3] + self.ov_config = {} + if device != "CPU": + # Already reshaped above, so nothing more needed here + pass + if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices): + self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"} + self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config) + + self.output_layer = self.compiled_model.output(0) + + def _find_best_model(self, model_path, use_quantized): + # Priority: quantized IR > IR > .pt + search_paths = [ + Path(model_path) if model_path else None, + Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None, + Path("yolo11x_openvino_model/yolo11x.xml"), + Path("rcb/yolo11x_openvino_model/yolo11x.xml"), + Path("yolo11x.xml"), + Path("rcb/yolo11x.xml"), + Path("yolo11x.pt"), + Path("rcb/yolo11x.pt") + ] + for p in search_paths: + if p and p.exists(): + return str(p) + raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.") + + def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]: + if conf_threshold is None: + conf_threshold = 0.1 # Lowered for debugging + start = time.time() + input_tensor = self._preprocess(frame) + output = self.compiled_model([input_tensor])[self.output_layer] + # Debug: print raw output shape + print(f"[DEBUG] Model output shape: {output.shape}") + detections = self._postprocess(output, frame.shape, conf_threshold) + print(f"[DEBUG] Detections after postprocess: {len(detections)}") + elapsed = time.time() - start + self._inference_times.append(elapsed) + self._frame_count += 1 + self.performance_stats['frames_processed'] = self._frame_count + self.performance_stats['total_detections'] += len(detections) + if len(self._inference_times) > 100: + self._inference_times.pop(0) + self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0 + total_time = time.time() - self._start_time + self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0 + return detections + + def _preprocess(self, frame: np.ndarray) -> np.ndarray: + img = cv2.resize(frame, (self.input_width, self.input_height)) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = img.astype(np.float32) / 255.0 + img = img.transpose(2, 0, 1)[None] + return img + + def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]: + # Output: (1, 84, 8400) or (84, 8400) or (8400, 84) + if output.ndim == 3: + output = np.squeeze(output) + if output.shape[0] == 84: + output = output.T # (8400, 84) + boxes = output[:, :4] + scores = output[:, 4:] + class_ids = np.argmax(scores, axis=1) + confidences = np.max(scores, axis=1) + detections = [] + h, w = frame_shape[:2] + for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)): + if score < conf_threshold: + continue + x_c, y_c, bw, bh = box + # If normalized, scale to input size + if all(0.0 <= v <= 1.0 for v in box): + x_c *= self.input_width + y_c *= self.input_height + bw *= self.input_width + bh *= self.input_height + # Scale to original frame size + scale_x = w / self.input_width + scale_y = h / self.input_height + x_c *= scale_x + y_c *= scale_y + bw *= scale_x + bh *= scale_y + x1 = int(round(x_c - bw / 2)) + y1 = int(round(y_c - bh / 2)) + x2 = int(round(x_c + bw / 2)) + y2 = int(round(y_c + bh / 2)) + x1 = max(0, min(x1, w - 1)) + y1 = max(0, min(y1, h - 1)) + x2 = max(0, min(x2, w - 1)) + y2 = max(0, min(y2, h - 1)) + if x2 <= x1 or y2 <= y1: + continue + # Only keep class 9 as traffic light, rename if found + if class_id == 9: + class_name = "traffic light" + elif class_id < len(TRAFFIC_CLASS_NAMES): + class_name = TRAFFIC_CLASS_NAMES[class_id] + else: + continue # Remove unknown/other classes + detections.append({ + 'bbox': [x1, y1, x2, y2], + 'confidence': float(score), + 'class_id': int(class_id), + 'class_name': class_name + }) + print(f"[DEBUG] Raw detections before NMS: {len(detections)}") + # Apply NMS + if len(detections) > 0: + boxes = np.array([det['bbox'] for det in detections]) + scores = np.array([det['confidence'] for det in detections]) + indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5) + if isinstance(indices, (list, tuple)) and len(indices) > 0: + indices = np.array(indices).flatten() + elif isinstance(indices, np.ndarray) and indices.size > 0: + indices = indices.flatten() + else: + indices = [] + detections = [detections[int(i)] for i in indices] if len(indices) > 0 else [] + print(f"[DEBUG] Detections after NMS: {len(detections)}") + return detections + + def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray: + # 80+ visually distinct colors for COCO classes (BGR) + COCO_COLORS = [ + (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49), + (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187), + (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236), + (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199), + (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49), + (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187), + (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236), + (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199), + (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49), + (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187), + (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), + (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), + (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), + (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), + (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49) + ] + for det in detections: + x1, y1, x2, y2 = det['bbox'] + label = f"{det['class_name']} {det['confidence']:.2f}" + color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)] + cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness) + cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) + return frame + +# --- Video/Image/Live Inference --- +def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None): + if isinstance(source, str) and not os.path.exists(source): + print(f"Downloading sample video: {source}") + import requests + url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4" + r = requests.get(url) + with open(source, 'wb') as f: + f.write(r.content) + cap = cv2.VideoCapture(source) + if not cap.isOpened(): + print(f"Failed to open video source: {source}") + return + window_name = "YOLOv11x + OpenVINO Detection" + if use_popup: + cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE) + frame_count = 0 + times = [] + while True: + ret, frame = cap.read() + if not ret: + break + if flip: + frame = cv2.flip(frame, 1) + if video_width: + scale = video_width / max(frame.shape[:2]) + frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) + start = time.time() + detections = detector.infer(frame, conf_threshold=conf_threshold) + frame = detector.draw(frame, detections) + elapsed = time.time() - start + times.append(elapsed) + if len(times) > 200: + times.pop(0) + fps = 1.0 / np.mean(times) if times else 0 + cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2) + if use_popup: + cv2.imshow(window_name, frame) + if cv2.waitKey(1) & 0xFF == 27: + break + else: + cv2.imshow(window_name, frame) + if cv2.waitKey(1) & 0xFF == 27: + break + frame_count += 1 + cap.release() + cv2.destroyAllWindows() + +# --- Main Entrypoint --- +if __name__ == "__main__": + # Choose model: yolo11x or yolo11n, etc. + MODEL_NAME = "yolo11x" + + DEVICE = "AUTO" # or "CPU", "GPU" + # Step 1: Convert model if needed + ov_xml = convert_yolo_to_openvino(MODEL_NAME) + # Step 2: Quantize (optional, demo skips actual quantization) + ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME) + # Step 3: Create detector + detector = OpenVINOYOLODetector(ov_xml, device=DEVICE) + # Step 4: Run on webcam, video, or image + # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg" + run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280) +# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25) +# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25) +# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed. + +import numpy as np +import cv2 + +def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None): + """ + output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85]) + conf_threshold: minimum confidence + iou_threshold: for NMS + input_shape: model input size (w, h) + original_shape: original image size (w, h) + """ + # 1. Squeeze batch dimension + output = np.squeeze(output) # [25200, 85] + + # 2. Split predictions + boxes = output[:, :4] + obj_conf = output[:, 4] + class_scores = output[:, 5:] + + # 3. Get class with highest score + class_ids = np.argmax(class_scores, axis=1) + class_conf = class_scores[np.arange(len(class_scores)), class_ids] + + # 4. Multiply objectness confidence with class confidence + scores = obj_conf * class_conf + + # 5. Filter by confidence threshold + mask = scores > conf_threshold + boxes = boxes[mask] + scores = scores[mask] + class_ids = class_ids[mask] + + if original_shape is not None: + # Rescale boxes from input_shape to original image shape + input_w, input_h = input_shape + orig_w, orig_h = original_shape + scale_x = orig_w / input_w + scale_y = orig_h / input_h + + boxes[:, 0] *= scale_x # x1 + boxes[:, 1] *= scale_y # y1 + boxes[:, 2] *= scale_x # x2 + boxes[:, 3] *= scale_y # y2 + + # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS + boxes_xywh = [] + for box in boxes: + x1, y1, x2, y2 = box + boxes_xywh.append([x1, y1, x2 - x1, y2 - y1]) + + # 7. Apply NMS + indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) + + # 8. Return filtered boxes + result_boxes = [] + result_scores = [] + result_classes = [] + if len(boxes) > 0 and len(scores) > 0: + indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) + if len(indices) > 0: + indices = np.array(indices).flatten() + for i in indices: + i = int(i) + result_boxes.append(boxes[i]) + result_scores.append(scores[i]) + result_classes.append(class_ids[i]) + return result_boxes, result_scores, result_classes + +import os +import time +import numpy as np +import cv2 +from pathlib import Path +from typing import List, Dict, Optional + +# Only traffic-related classes for detection +TRAFFIC_CLASS_NAMES = [ + 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', + 'traffic light', 'stop sign', 'parking meter' +] + +class OpenVINOVehicleDetector: + def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4): + import openvino as ov + self.device = device + self.confidence_threshold = confidence_threshold + self.ocr_reader = None + self.class_names = TRAFFIC_CLASS_NAMES + self.performance_stats = { + 'fps': 0, + 'avg_inference_time': 0, + 'frames_processed': 0, + 'backend': f"OpenVINO-{device}", + 'total_detections': 0, + 'detection_rate': 0 + } + self._inference_times = [] + self._start_time = time.time() + self._frame_count = 0 + # Model selection logic + self.model_path = self._find_best_model(model_path, use_quantized) + self.core = ov.Core() + self.model = self.core.read_model(self.model_path) + # Always reshape to static shape before accessing .shape + self.model.reshape({0: [1, 3, 640, 640]}) + self.input_shape = self.model.inputs[0].shape + self.input_height = self.input_shape[2] + self.input_width = self.input_shape[3] + self.ov_config = {} + if device != "CPU": + # Already reshaped above, so nothing more needed here + pass + if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices): + self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"} + self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config) + + self.output_layer = self.compiled_model.output(0) + + def _find_best_model(self, model_path, use_quantized): + # Priority: quantized IR > IR > .pt + search_paths = [ + Path(model_path) if model_path else None, + Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None, + Path("yolo11x_openvino_model/yolo11x.xml"), + Path("rcb/yolo11x_openvino_model/yolo11x.xml"), + Path("yolo11x.xml"), + Path("rcb/yolo11x.xml"), + Path("yolo11x.pt"), + Path("rcb/yolo11x.pt") + ] + for p in search_paths: + if p and p.exists(): + return str(p) + raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.") + + def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]: + if conf_threshold is None: + conf_threshold = 0.1 # Lowered for debugging + start = time.time() + input_tensor = self._preprocess(frame) + output = self.compiled_model([input_tensor])[self.output_layer] + # Debug: print raw output shape + print(f"[DEBUG] Model output shape: {output.shape}") + detections = self._postprocess(output, frame.shape, conf_threshold) + print(f"[DEBUG] Detections after postprocess: {len(detections)}") + elapsed = time.time() - start + self._inference_times.append(elapsed) + self._frame_count += 1 + self.performance_stats['frames_processed'] = self._frame_count + self.performance_stats['total_detections'] += len(detections) + if len(self._inference_times) > 100: + self._inference_times.pop(0) + self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0 + total_time = time.time() - self._start_time + self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0 + return detections + + def _preprocess(self, frame: np.ndarray) -> np.ndarray: + img = cv2.resize(frame, (self.input_width, self.input_height)) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = img.astype(np.float32) / 255.0 + img = img.transpose(2, 0, 1)[None] + return img + + def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]: + # Output: (1, 84, 8400) or (84, 8400) or (8400, 84) + if output.ndim == 3: + output = np.squeeze(output) + if output.shape[0] == 84: + output = output.T # (8400, 84) + boxes = output[:, :4] + scores = output[:, 4:] + class_ids = np.argmax(scores, axis=1) + confidences = np.max(scores, axis=1) + detections = [] + h, w = frame_shape[:2] + for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)): + if score < conf_threshold: + continue + x_c, y_c, bw, bh = box + # If normalized, scale to input size + if all(0.0 <= v <= 1.0 for v in box): + x_c *= self.input_width + y_c *= self.input_height + bw *= self.input_width + bh *= self.input_height + # Scale to original frame size + scale_x = w / self.input_width + scale_y = h / self.input_height + x_c *= scale_x + y_c *= scale_y + bw *= scale_x + bh *= scale_y + x1 = int(round(x_c - bw / 2)) + y1 = int(round(y_c - bh / 2)) + x2 = int(round(x_c + bw / 2)) + y2 = int(round(y_c + bh / 2)) + x1 = max(0, min(x1, w - 1)) + y1 = max(0, min(y1, h - 1)) + x2 = max(0, min(x2, w - 1)) + y2 = max(0, min(y2, h - 1)) + if x2 <= x1 or y2 <= y1: + continue + # Only keep class 9 as traffic light, rename if found + if class_id == 9: + class_name = "traffic light" + elif class_id < len(TRAFFIC_CLASS_NAMES): + class_name = TRAFFIC_CLASS_NAMES[class_id] + else: + continue # Remove unknown/other classes + detections.append({ + 'bbox': [x1, y1, x2, y2], + 'confidence': float(score), + 'class_id': int(class_id), + 'class_name': class_name + }) + print(f"[DEBUG] Raw detections before NMS: {len(detections)}") + # Apply NMS + if len(detections) > 0: + boxes = np.array([det['bbox'] for det in detections]) + scores = np.array([det['confidence'] for det in detections]) + indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5) + if isinstance(indices, (list, tuple)) and len(indices) > 0: + indices = np.array(indices).flatten() + elif isinstance(indices, np.ndarray) and indices.size > 0: + indices = indices.flatten() + else: + indices = [] + detections = [detections[int(i)] for i in indices] if len(indices) > 0 else [] + print(f"[DEBUG] Detections after NMS: {len(detections)}") + return detections + + def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray: + # 80+ visually distinct colors for COCO classes (BGR) + COCO_COLORS = [ + (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49), + (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187), + (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236), + (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199), + (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49), + (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187), + (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236), + (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199), + (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49), + (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187), + (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), + (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), + (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), + (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), + (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49) + ] + for det in detections: + x1, y1, x2, y2 = det['bbox'] + label = f"{det['class_name']} {det['confidence']:.2f}" + color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)] + cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness) + cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) + return frame + +# --- Video/Image/Live Inference --- +def run_inference(detector: OpenVINOYOLODetector, source=0, conf_threshold=0.25, flip=False, use_popup=False, video_width=None): + if isinstance(source, str) and not os.path.exists(source): + print(f"Downloading sample video: {source}") + import requests + url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4" + r = requests.get(url) + with open(source, 'wb') as f: + f.write(r.content) + cap = cv2.VideoCapture(source) + if not cap.isOpened(): + print(f"Failed to open video source: {source}") + return + window_name = "YOLOv11x + OpenVINO Detection" + if use_popup: + cv2.namedWindow(window_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE) + frame_count = 0 + times = [] + while True: + ret, frame = cap.read() + if not ret: + break + if flip: + frame = cv2.flip(frame, 1) + if video_width: + scale = video_width / max(frame.shape[:2]) + frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) + start = time.time() + detections = detector.infer(frame, conf_threshold=conf_threshold) + frame = detector.draw(frame, detections) + elapsed = time.time() - start + times.append(elapsed) + if len(times) > 200: + times.pop(0) + fps = 1.0 / np.mean(times) if times else 0 + cv2.putText(frame, f"FPS: {fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2) + if use_popup: + cv2.imshow(window_name, frame) + if cv2.waitKey(1) & 0xFF == 27: + break + else: + cv2.imshow(window_name, frame) + if cv2.waitKey(1) & 0xFF == 27: + break + frame_count += 1 + cap.release() + cv2.destroyAllWindows() + +# --- Main Entrypoint --- +if __name__ == "__main__": + # Choose model: yolo11x or yolo11n, etc. + MODEL_NAME = "yolo11x" + + DEVICE = "AUTO" # or "CPU", "GPU" + # Step 1: Convert model if needed + ov_xml = convert_yolo_to_openvino(MODEL_NAME) + # Step 2: Quantize (optional, demo skips actual quantization) + ov_xml = quantize_openvino_model(ov_xml, MODEL_NAME) + # Step 3: Create detector + detector = OpenVINOYOLODetector(ov_xml, device=DEVICE) + # Step 4: Run on webcam, video, or image + # Webcam: source=0, Video: source="video.mp4", Image: source="image.jpg" + run_inference(detector, source=0, conf_threshold=0.25, flip=True, use_popup=True, video_width=1280) +# To run on a video file: run_inference(detector, source="people.mp4", conf_threshold=0.25) +# To run on an image: run_inference(detector, source="image.jpg", conf_threshold=0.25) +# To run async or batch, extend the OpenVINOYOLODetector class with async API as needed. + +import numpy as np +import cv2 + +def postprocess_openvino_yolo(output, conf_threshold=0.4, iou_threshold=0.5, input_shape=(640, 640), original_shape=None): + """ + output: OpenVINO raw output tensor (e.g., shape [1, 25200, 85]) + conf_threshold: minimum confidence + iou_threshold: for NMS + input_shape: model input size (w, h) + original_shape: original image size (w, h) + """ + # 1. Squeeze batch dimension + output = np.squeeze(output) # [25200, 85] + + # 2. Split predictions + boxes = output[:, :4] + obj_conf = output[:, 4] + class_scores = output[:, 5:] + + # 3. Get class with highest score + class_ids = np.argmax(class_scores, axis=1) + class_conf = class_scores[np.arange(len(class_scores)), class_ids] + + # 4. Multiply objectness confidence with class confidence + scores = obj_conf * class_conf + + # 5. Filter by confidence threshold + mask = scores > conf_threshold + boxes = boxes[mask] + scores = scores[mask] + class_ids = class_ids[mask] + + if original_shape is not None: + # Rescale boxes from input_shape to original image shape + input_w, input_h = input_shape + orig_w, orig_h = original_shape + scale_x = orig_w / input_w + scale_y = orig_h / input_h + + boxes[:, 0] *= scale_x # x1 + boxes[:, 1] *= scale_y # y1 + boxes[:, 2] *= scale_x # x2 + boxes[:, 3] *= scale_y # y2 + + # 6. Convert boxes to [x, y, w, h] format for OpenCV NMS + boxes_xywh = [] + for box in boxes: + x1, y1, x2, y2 = box + boxes_xywh.append([x1, y1, x2 - x1, y2 - y1]) + + # 7. Apply NMS + indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) + + # 8. Return filtered boxes + result_boxes = [] + result_scores = [] + result_classes = [] + if len(boxes) > 0 and len(scores) > 0: + indices = cv2.dnn.NMSBoxes(boxes_xywh, scores.tolist(), conf_threshold, iou_threshold) + if len(indices) > 0: + indices = np.array(indices).flatten() + for i in indices: + i = int(i) + result_boxes.append(boxes[i]) + result_scores.append(scores[i]) + result_classes.append(class_ids[i]) + return result_boxes, result_scores, result_classes + +import os +import time +import numpy as np +import cv2 +from pathlib import Path +from typing import List, Dict, Optional + +# Only traffic-related classes for detection +TRAFFIC_CLASS_NAMES = [ + 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', + 'traffic light', 'stop sign', 'parking meter' +] + +class OpenVINOVehicleDetector: + def __init__(self, model_path: str = None, device: str = "AUTO", use_quantized: bool = False, enable_ocr: bool = False, confidence_threshold: float = 0.4): + import openvino as ov + self.device = device + self.confidence_threshold = confidence_threshold + self.ocr_reader = None + self.class_names = TRAFFIC_CLASS_NAMES + self.performance_stats = { + 'fps': 0, + 'avg_inference_time': 0, + 'frames_processed': 0, + 'backend': f"OpenVINO-{device}", + 'total_detections': 0, + 'detection_rate': 0 + } + self._inference_times = [] + self._start_time = time.time() + self._frame_count = 0 + # Model selection logic + self.model_path = self._find_best_model(model_path, use_quantized) + self.core = ov.Core() + self.model = self.core.read_model(self.model_path) + # Always reshape to static shape before accessing .shape + self.model.reshape({0: [1, 3, 640, 640]}) + self.input_shape = self.model.inputs[0].shape + self.input_height = self.input_shape[2] + self.input_width = self.input_shape[3] + self.ov_config = {} + if device != "CPU": + # Already reshaped above, so nothing more needed here + pass + if "GPU" in device or ("AUTO" in device and "GPU" in self.core.available_devices): + self.ov_config = {"GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"} + self.compiled_model = self.core.compile_model(model=self.model, device_name=self.device, config=self.ov_config) + + self.output_layer = self.compiled_model.output(0) + + def _find_best_model(self, model_path, use_quantized): + # Priority: quantized IR > IR > .pt + search_paths = [ + Path(model_path) if model_path else None, + Path("yolo11x_openvino_int8_model/yolo11x.xml") if use_quantized else None, + Path("yolo11x_openvino_model/yolo11x.xml"), + Path("rcb/yolo11x_openvino_model/yolo11x.xml"), + Path("yolo11x.xml"), + Path("rcb/yolo11x.xml"), + Path("yolo11x.pt"), + Path("rcb/yolo11x.pt") + ] + for p in search_paths: + if p and p.exists(): + return str(p) + raise FileNotFoundError("No suitable YOLOv11x model found for OpenVINO.") + + def detect_vehicles(self, frame: np.ndarray, conf_threshold: float = None) -> List[Dict]: + if conf_threshold is None: + conf_threshold = 0.1 # Lowered for debugging + start = time.time() + input_tensor = self._preprocess(frame) + output = self.compiled_model([input_tensor])[self.output_layer] + # Debug: print raw output shape + print(f"[DEBUG] Model output shape: {output.shape}") + detections = self._postprocess(output, frame.shape, conf_threshold) + print(f"[DEBUG] Detections after postprocess: {len(detections)}") + elapsed = time.time() - start + self._inference_times.append(elapsed) + self._frame_count += 1 + self.performance_stats['frames_processed'] = self._frame_count + self.performance_stats['total_detections'] += len(detections) + if len(self._inference_times) > 100: + self._inference_times.pop(0) + self.performance_stats['avg_inference_time'] = float(np.mean(self._inference_times)) if self._inference_times else 0 + total_time = time.time() - self._start_time + self.performance_stats['fps'] = self._frame_count / total_time if total_time > 0 else 0 + return detections + + def _preprocess(self, frame: np.ndarray) -> np.ndarray: + img = cv2.resize(frame, (self.input_width, self.input_height)) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = img.astype(np.float32) / 255.0 + img = img.transpose(2, 0, 1)[None] + return img + + def _postprocess(self, output: np.ndarray, frame_shape, conf_threshold: float) -> List[Dict]: + # Output: (1, 84, 8400) or (84, 8400) or (8400, 84) + if output.ndim == 3: + output = np.squeeze(output) + if output.shape[0] == 84: + output = output.T # (8400, 84) + boxes = output[:, :4] + scores = output[:, 4:] + class_ids = np.argmax(scores, axis=1) + confidences = np.max(scores, axis=1) + detections = [] + h, w = frame_shape[:2] + for i, (box, score, class_id) in enumerate(zip(boxes, confidences, class_ids)): + if score < conf_threshold: + continue + x_c, y_c, bw, bh = box + # If normalized, scale to input size + if all(0.0 <= v <= 1.0 for v in box): + x_c *= self.input_width + y_c *= self.input_height + bw *= self.input_width + bh *= self.input_height + # Scale to original frame size + scale_x = w / self.input_width + scale_y = h / self.input_height + x_c *= scale_x + y_c *= scale_y + bw *= scale_x + bh *= scale_y + x1 = int(round(x_c - bw / 2)) + y1 = int(round(y_c - bh / 2)) + x2 = int(round(x_c + bw / 2)) + y2 = int(round(y_c + bh / 2)) + x1 = max(0, min(x1, w - 1)) + y1 = max(0, min(y1, h - 1)) + x2 = max(0, min(x2, w - 1)) + y2 = max(0, min(y2, h - 1)) + if x2 <= x1 or y2 <= y1: + continue + # Only keep class 9 as traffic light, rename if found + if class_id == 9: + class_name = "traffic light" + elif class_id < len(TRAFFIC_CLASS_NAMES): + class_name = TRAFFIC_CLASS_NAMES[class_id] + else: + continue # Remove unknown/other classes + detections.append({ + 'bbox': [x1, y1, x2, y2], + 'confidence': float(score), + 'class_id': int(class_id), + 'class_name': class_name + }) + print(f"[DEBUG] Raw detections before NMS: {len(detections)}") + # Apply NMS + if len(detections) > 0: + boxes = np.array([det['bbox'] for det in detections]) + scores = np.array([det['confidence'] for det in detections]) + indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, 0.5) + if isinstance(indices, (list, tuple)) and len(indices) > 0: + indices = np.array(indices).flatten() + elif isinstance(indices, np.ndarray) and indices.size > 0: + indices = indices.flatten() + else: + indices = [] + detections = [detections[int(i)] for i in indices] if len(indices) > 0 else [] + print(f"[DEBUG] Detections after NMS: {len(detections)}") + return detections + + def draw(self, frame: np.ndarray, detections: List[Dict], box_thickness: int = 2) -> np.ndarray: + # 80+ visually distinct colors for COCO classes (BGR) + COCO_COLORS = [ + (255, 56, 56), (255, 157, 151), (255, 112, 31), (255, 178, 29), (207, 210, 49), + (72, 249, 10), (146, 204, 23), (61, 219, 134), (26, 147, 52), (0, 212, 187), + (44, 153, 168), (0, 194, 255), (52, 69, 147), (100, 115, 255), (0, 24, 236), + (132, 56, 255), (82, 0, 133), (203, 56, 255), (255, 149, 200), (255, 55, 199), + (255, 255, 56), (255, 255, 151), (255, 255, 31), (255, 255, 29), (207, 255, 49), + (72, 255, 10), (146, 255, 23), (61, 255, 134), (26, 255, 52), (0, 255, 187), + (44, 255, 168), (0, 255, 255), (52, 255, 147), (100, 255, 255), (0, 255, 236), + (132, 255, 255), (82, 255, 133), (203, 255, 255), (255, 255, 200), (255, 255, 199), + (56, 255, 255), (157, 255, 151), (112, 255, 31), (178, 255, 29), (210, 255, 49), + (249, 255, 10), (204, 255, 23), (219, 255, 134), (147, 255, 52), (212, 255, 187), + (153, 255, 168), (194, 255, 255), (69, 255, 147), (115, 255, 255), (24, 255, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), + (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), + (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49), + (249, 72, 10), (204, 146, 23), (219, 61, 134), (147, 26, 52), (212, 0, 187), + (153, 44, 168), (194, 0, 255), (69, 52, 147), (115, 100, 255), (24, 0, 236), + (56, 132, 255), (157, 82, 151), (112, 203, 31), (178, 255, 29), (210, 255, 49) + ] + for det in detections: + x1, y1, x2, y2 = det['bbox'] + label = f"{det['class_name']} {det['confidence']:.2f}" + color = COCO_COLORS[det['class_id'] % len(COCO_COLORS)] + cv2.rectangle(frame, (x1, y1), (x2, y2), color, box_thickness) + cv2.putText(frame, label, (x1, max(y1 - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) + return frame \ No newline at end of file diff --git a/qt_app_pyside1/__pycache__/red_light_violation_pipeline.cpython-311.pyc b/qt_app_pyside1/__pycache__/red_light_violation_pipeline.cpython-311.pyc index 26a997fd74f41ac89ab63e5b973ea310ef2d5cb4..2874dc0420921825a63710b9b5727d1c6bf7ab05 100644 GIT binary patch delta 98 zcmX@u!FZ{Ik$X8WFBbz4gs86A$eqaSl;CU?6Iz^FR2-9%T9%xXUz!r*l3JWyl3x(x ymYJ8BlNytplbV>Pn+D?O7L*oe#1xjqCl(aM7gQE!rlcCiOuncsymhxLuHYIWI340}vRgq;KS&&1zujVin_(U!IqfpO{h{Q&MJIqO3v0AzC-ZvX%Q diff --git a/qt_app_pyside1/config.json b/qt_app_pyside1/config.json index c065732..a482cae 100644 --- a/qt_app_pyside1/config.json +++ b/qt_app_pyside1/config.json @@ -4,7 +4,8 @@ "enable_ocr": true, "enable_tracking": true, "model_path": null, - "device": "CPU" + "model": "YOLOv11n", + "device": "GPU" }, "violations": { "red_light_grace_period": 2.0, @@ -29,5 +30,11 @@ "analytics": { "enable_charts": true, "history_length": 1000 + }, + "vlm": { + "backend_url": "http://localhost:8399", + "enable_search": true, + "enable_vqa": true, + "timeout": 30 } } \ No newline at end of file diff --git a/qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc index 9af86486837451f055af09be4deeebf30796fbd0..a4456088ac3c38e1afd42fe4670e1ddb2daf7999 100644 GIT binary patch delta 73 zcmZ3%c$ATQIWI340}zC$u9(PeY82sY6%$&VT2vg9l3JFWlV6$=Pn+D?O7L*oeY~m=e1pwq+8y^4w delta 56 zcmZqcX8h2_$i1AGmx}=iEM66D9A~ delta 76 zcmX@`oN@hgM(*Xjyj%=Gu?uP SnCy(ilEhq8!Ogo^+42A;101UW diff --git a/qt_app_pyside1/controllers/__pycache__/model_manager.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/model_manager.cpython-311.pyc index 49de43fc2faaab1cd73a0a91e35eb071851200c3..53fe91a9a876fab7bcf4a00726a898a8e9116365 100644 GIT binary patch delta 8215 zcmb_BZEzdcad$X;e;z>)AV7jVeh3sPi6lsUP_$%ce6!(z0m?cO)T#fcgMP zG7B22M|PRSifKPht+e!rNgPLsHL+Z}4ab>IX*;duKbZrIgbX^GsCF}Lr}3B`*AtJE zOuO$600~-_+D?y;d;9k7?)$j+cHb_(y-5Gf2ekS7CX=3m=dIt~7~ekmhWT~crl2Y6 zB}$-%X`UKZ@boalJ8+V2po=lBC{HMPm7p9|@`g*wVFTdCQ3Y=V+{Bv%^BD_KMjmKk z1&vd#00ap$a24>DeENN))I1APR=z;cKtI+?O3!d1UwDZcw(&NAMSL+|B#EZjN=znOZ%=%v*&4E5u~OSJi{yDPr<8r^_+^=z@L`a%`v=QqQff8DS0Dr;>~jk z-h#iOeuQT6hw2ZRg183LH4c17Q%YO$A2k1>Ni#i12WwUMLv5Qjt%mUg{K7(|&QH^g z_*wlp{R8~5zRa@fDql0GUFJ2402Vt951MKmF8vw>V8*=Gw7?s7(L?)TMc<~_j#}z% zww>y5Gw&1_q4JJf*-@c+r-C7PHBIm>5?-tCv@q{9TRZj4yLuJiewb$NQ3J-n|EI4g z@h8UL70_{NM3ZfeQ}bF}V6x$BOrcJfrDC7C7(ZcmV3)ZC?{FEEye_VQ8QaX!qAU+Z z#cACBa7m$leH+{`yb3+N#^7C+3dSgl;5}PS2dOiLtQ@a?RRKKkFF&X}zM=JuDNCPb zqB$HkoGH$b0VqS3s0sf`jjNibqV6m&v|DblufnWf%gr2SX~FFlj}p|g_#<D3h4HLc3ybjDTqpSKnn3VCI)Xj&at&)YFw zTU|Cp@ycn{Qz~Bha0WY1pQkP`L)3Y?)`h>u?pALD#i$(rJ$od9IKb1&_Q4ZJVuZYQ z?aKGL!(mdqlVpMLvI)81hXDBcmuhDK~Et zA+1PDlot?%nsHx*j1F=5H`b=Yc0x1sO_I4DL@12V9_%f2?bIN02BpQ7zpV-Si}`{h z@&pl)L&SujegF}2)Hxh3^qNnS>Jl8hi`1HO7yeOUqGC-=)p%#RQpT!PPu+^AciGc> z%a!z;NO?|3)H}G|#?|&NsYPoql%%mQW$Y7;eYcGTqV?!4ARmR2G#*PCkBP=(_zBw% zUQd~{X`dCLs|N|GH)u|uL%eob0VB-z0fJ1DY)pQ#tVY3rlgdRgl2x~^t;{XGG_Q)&=O|tq}`dBb$Rurbd3C9!}sKr>#l7wxg!gM)op zwb?AiY4jqmh9P98p@p%=`GR$V83+vNH{|E7o5|2wsGl#~Ott~p%NK1X+ksrnJJ!j? zpj*KgZyK=^7$k?1a|{%9MvV_R>{{M61;lA3UUGPB=tx{CO}yk~g+x|?Rh)HJTTye~ z0+#|A^s!NLlrm3THLnAUS&1Kav0QXRN4z_Kg@|z_N=a|9Ot8%N>8cUzdaLHh2-xRH#yEJ8om64!%@lc(V9Oq-_7abw&hu~_03!cvS{^R`Aq+zboa z2#poTiYy!Vhb1>ZGyX-1#j^3ldSQct_f61n;cS|EO3!7U?m&p=qT>R0T8KnB$@X)^ z_H(Cw5pZSU5a*Y?X$*mj42SA-f&=0691~B4SO`jQ>23n(;{47B?$Nme)q$Wfu#6 z@rAP2IdZFr5D%UGBH1^x0}*xNk@A|v3xr9pl6;+!GM0#j$~*3gxks;;q};n!+!jsq%D}T1x2Y4Z{8^JCZOeB%)n_ZS)+`)#yb|r)wY$A#^sX6WJy!1q)D`p zvRYVt^~g%8ce&J?ENxDeHj9O%tdg)vF^UQxSH&y_MB>5k-=JS=Pn z^1z0WW1;OpeTmY6GoME!uovBzFIJ>E_K%UiB(g?AS zQeGhfA!>0?EAy7X8driNb|ea?l}{;o8?1wbZ3t0{rYa7~x8#h8BKKg~JdFl|f@Do& z9?%Y<%4IIYx^B?cxry0@t3R>m66;Z7h9x7@$|cYVAg8rMM;3Yv)LWlgBwsHe$&0uIE6h0qh3eeI~6qq}HLlYk+yClv=X$}8lY5UG*AnEE&xq4T3w!C>* zv>%uzMX`-u_-(^AC4GJ2rJbkr zHDv4q=qiyXqetH&fQbIPl3>K+!zX(NyV}0yxJD{UsTE1{mn43K-)d_}bP(?E5%66A zX;bVh5BaDOlEImXpdLcg`oVaU1QB|g@ZBWf4bq?wz$F`YSjaD=m3+V-mCf>B5rzfA zpc^@TI(%Uy5RRppKq!hNgYA}=<~reeMo-w70Dl2p##0AM=qvco4{-F?@sAIbspXFF zKOJb*Nw(V!ytty>gYpF9&IgxY9|YQI4jbYm#c9SbpHFWl>0nVnc5at&*}8kj@x_%S>D)=RT%2 zaVD1ZyeOwt|s@ik4wA#0V#0rwsV%5-rR1Z6fCD9CTtODPW>gyj{- zIQ*~GEWX@3Q3Sd8P5BvJT!&44<+K^^=zBUrVx&w=L$D(R90d?70kC!re z(wped63|PnW5k&n2-xyACq2^-)c-6y(yztG`pnmXUw-r6nS{@ zaR5+~+JTgIK-3O^l+!(6uZcwBLH8 z*|PBaL$4?tZ^BT}UlTw)j^vWysxgm$JaD{41WLpJ08U=^k?-I?0pgTBZ>l-BQL0#I zKhmr+Y_MuLabmjzr9dccjKFU${&D&2gSX@GdnaB!E_pP`X%p`ynHcmD!N`KkE+`W- z{0AYd07z>xUrqAGn}dXp@ZSxVDe4ppzZ$Glz|U6tll8QK_nveg>LdzC41wMv;1W@# z2%Sx9Lc9+l-;;>gQFMcVBmuVxAfFbINvD;k!{MMjwmE$HWGQ_E|M2AAM1!jAy7p$_ z4bvJ0=wh|>+6+$9dsLNcR1P3|j8@s!C<1RjEWej}Z$13e!!mvsc$}$Gxfhu=3SLX{ zi{Q0zu?C6^j?-$D{<;!=tyRGlD<81<@Q|>rPSr%(;k6l@q%DM)1C{t^Lx*TDb`6)% zpW@xa^_62}DaOfC7_!$$K7iyOo5*zp3HaIJA$ktq87@{(H2z}v-`bwi(nV|3-Qgz^ N{Xf5=6%~WZ{{|ZOnp6M) delta 3863 zcma)9eQZN@H*8hT8qH{jteNoiuftv~%A} zNytRPyElJz5uBt0aXGk6`;A|Ik_)L1Zg*lSCuXaSA^Q`&58cah4Fl@kRrwYZJCso z8fop{XSVE;{AS`b=AIBtgdCa_h)5*(2t+P7gG%|1(kt#YxZ`e!-*5b#%h6lDji|F$ zyx{l1%Y|C0GiJXK6FJ?8Io)K^%Uzok@j`3GCa3sYr@TpF_1CKXW7mWKuCAbdU>M?%s?iqSe+P$E?0=OJ_nn;N%S!`ne^qPnD?l3VdqzeJRNF#n zfzmbXS5-AlHd@7nZrI?0bUlYH07V-BbB9(b5AnC9f|=;jGInohO1xSXW{dl)!w0)Q z@C0Tm)}0Fh!1uBz@xCW9<4Lf#>f5nWx>$V=?&xx$w{GiP0nNow)8_T)Vghq|eQZlq zyc7|DzEp=f9bFFe+YVQ^U3|OZwr)ZEogjDH*?r;Vu6Ja

=%VG1l(AgB?|aS%FCV zU{ZXhI&5)^rePvaVhyC{9s<2DAY#`spyPyma~k9q5$6|-aGj9ZU({M*SYk8Ykc+q{ zAlW8GU1X214zXPkrIPN{MN_%wgkW|`JZG#o9jKVCs}K|c$Rl-4Y@p7cB0;G7RU|~J$s$u#;DkuRq-Mx5>6lYZ zB7=fS*UoYzQa8(qo)Ad=pfDs(I(5fp^eCHj-6AT{Tg5_mnd|1(-E&ip7V+V=x)anq z+z&Z6>6!HEZrx*wDY`NzwumMcEKN-JLXs^8fsMUY?9Yl@0MhbLVY?Rlyz}$2fxS5x z3K9gzTH_9C77KH$w==b2thA!Bglk>qHm4h5A*s2oc?CDb^f-aN?pm_M>$CQ7Hy7Bz z?pyL|J>SknA3$v3LqlAqlSP&;3q8YS-T+;2a@mk4GUINT-O!nXT~~co$Eu#6IyiM| z>2%kXhSr&e)@uzbj`%+CSIvZmrfmT5J-^dx0WbSA@B1?|<`>j8uKY5xr4`Ft#8Ry7 zJ)2b~BkbdKui%aBWMaE;4Y7YEUb3lt!h@ynum2N{aC$EKf&>rQCy%y z?D-Ad7_loG-m@wnFN|~}JJY@_wFXEdJf>xH#b%AB1_!h0WG*|D(UK;Q?&G34E+ppE zyde!2vU#)i(Jo-d6)U&{Tp+kNRaBGdLVhr*WvE)r6mo>7SzAY9X$JbOviYe#THs?` zmMtjn1-gZ?jJOF z!Cxe^mzt3=M;u~DcD78J`Eh^`c?!Vrj*XI(rY6DV^uD5D&!mbeO{0b*oyz4>dvmH` zhjW9o=r{SG!+ek{3ujSLOTjT@NF z)Qsq}Tx@`giCMCr@{kxF)4ocRMAcvxfXR{6C@nCuE4GBYVAu*{+US^O*k)&nvxaPv zv-4sv?@HMCfmEzxcxmuxkMn4c#GE~=vA5LTGc7aS<`irKyS|6GbPSud(=5!tc&JVk z!-&1NDk|w1D)p&ti((iZZsjjGE+lY20M*t5LaQF`zFpD?tS_X+mnnhFtWD7)TUk6kR#_srD1jSRZSxV`d8_EK!Kc>#!}GpYQLN@9bw zFcO=PVp<_KQL{2e)oebiWmCEA1iuoD6o?8*+uUp({B$oUiQDHDgVn&W7O0X_0RY2g zX6IHjJwez!++Q_2AXi}#`2B+>Oh#TEpJf<|)sdW?7kv!`iu^QTzkK8_+tY9nEOxQ~ zJhGLI_G@>B8&6GK39mY+TyuxZNOA_v)1OQa&r}fjo}X=jyzEim_oy=-^_s`ezW3;( z7|u&uWoc&MkWl>{&`ytV;7M&}J=}GkWTWZ(BR>UB5l-0o<;yBv$Jhtyi0~s^`Xt>Z zyXQrjt`$I@xFSTuKc(X_yesx==zcp|)=FVyx_J~gD$=J|$S=aKs>0MuAN@{~>u z*9q`H>esnu+{6Bvt9zi6TM+W2hCNSGl%}4b+yMOsho5oyIfr8ahP1a($XT-;=gdVM z%zDoMkWmiYi(-<|k#$qea&+2pE_l{kMnIl#GMk%W0LSHqGWrzu;CrxKSwxf2fn81D2Kg`2RJBw1=*X?a1TJ?w=-Z)BnCpE&M@1s=<(?evd``9}ZV V(PhU~Wpr!2Zt_3=oL)1a{|CC!p#J~> diff --git a/qt_app_pyside1/controllers/__pycache__/performance_overlay.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/performance_overlay.cpython-311.pyc index d2a4744e255952451dff620b604d277733c281c4..c077357bf38d37faabf4e58b6dd0ae9c431fe337 100644 GIT binary patch delta 76 zcmZpZo+Hh@oR^o20SH1=S8U|AU^Pm1wu%WYPAw{qNl7hB&dD!LiE&9S&MwI>h;hrz cOUy})NzO@4%+pN+adZnxi!(OIv#N6d0DLDIB>(^b delta 54 zcmbOu-6qYwoR^o20SHnSH%zsL0P=VoZ2$lO diff --git a/qt_app_pyside1/controllers/__pycache__/video_controller_new.cpython-311.pyc b/qt_app_pyside1/controllers/__pycache__/video_controller_new.cpython-311.pyc index 96751d17ee67a6894f19e9d7131d75cd3373ef3c..b0dcd1568456b61a943cfeac6ef61ab62da600ea 100644 GIT binary patch delta 32030 zcmb`w31F1fwK)ELvt^P@_Q^)J$-XC$up|LORtRB92zyvU$Tx(*WD>rau*5qISX8uB zBiA;lD8VYCV8voZqq9BEmTM9Dn(_op*}L2cr`C$OrTD?fu$LEzb#@pfH8@ zhjoVahj)fEztW`Z)Iqq?6ww*Mej_`h;8$ge?vL(_W;kk7On+==EOTp2asB#EeSdss ze1Ae`f+vr}&P0|rsWXZFCU+(qlXlHOO7Jtrr_lOHeWWolAT0Guf>ek_2Y06{jOp}+ z(o{OgN5X%+J)_^yY3R@F%)KuJG(pkdXVW!glvd*&p^3L-9nVmCPSh#6ce?@0Se`RN7|Lo4$EKFyb(?7R! zF3tQMjH)+o3ooDg$_6Jm}0870S^gMaDMIbTq&<_hC} zgWf_dU8tzlxK*o71J;XoDCR4|^C4KwLr_33tNxtBoEYeiu!`L~cJ%dhnfiKnTEzmy zDP-7mQk|Ak3eG~_t+t9oJ-fx3l25sP$Rf_7(V7yzf-cl#@^fjIrmSpRW6kEZ7Ncm{ zEExCpmy-oI|JpO^OntZ79HjG^PR=etjTm%Sb?HeOE-mC=*c+|^ygub zEqxr}x-?#NK?%MPna@`ZK?QN4-Z++ysVs(A6R9&Ih3*ZHr$3K~36Jz*Su<#9ct&`% z+#ed_^;1K8uAS?O^ZHpCdY>-@N{sjV2oJslua8lhDDx$GeT3WP!xP)AmELd;>gtaX zv}ZsV&MC6I(PB+tbb!W#5qLK~F0_x65_s{)SooqaAzU5~WTtv?%ppQ}px>(Y#&DdV zgYr`8p_EiRt4*3Wsu$ZN-Rlz~gb*R}qyoY+ykU&{2AMC@>l2~?(^n}W8hqKlxES!| z_Yl&m_p zPmw?$h)AHx5oyWm{HdVaVz1Atrb(t$jd@l!XI(@0M5OUakR&lA`3Q2Qm_8Sg&2L0$ zi$J5oK7<=91EZYCm(rNXlz3nGu5vHN*i05b3*wU$a$(J^@FD@5@o&aSR06+f_hMn0 zv%Of*WIo?oD0dui{uWU`$w)4Enn7miXeAQl`b;#=iQAs(UD%W%%alSX+dcf=A zQ2Xg85s|CV{;>~h$23!I|00`;=M)@K3F+rEre#*=K}Qwwxp$#~WFJrc8W@A?Jb~P< zMz6~g-xL(z92CD8`9nV~)-4tcauT5bg02VK+?#|6Q;tWN>b#*c@ykSEO5X@cZOVfn zb*nwDef&Odzj7nDk1x!kH=_Q$M$^jJ_(60R&o7|mvDGsc zgO#*IK_-W|t7{yq*E=?DaxC9`h!bn+ow2jJd3XHk9;4Y<553j_YhRD0Ug#dQ8b!BG zkb1VR{_Z|AbdaGv-Tg+fyQ@za!fy4KC+{+_gobW!N<+7l)X?XrYp95BjY*BOU=e=I z8bjd{`eJN$QApQd_mIUXxWl_l-4<)t4iO4*$4MTRv8B(_C3IW6=|{1RiD>ZMQ7lOp zuuXvK517ShIwNjhR>;n7OSjc3ic1km*=w}ABYTEK(P*}INqnIrasSD$qxa~u`7C-) zKbQVmZ{^eJruY;)S{irMUgOR_=)}8Vz|gZBSP(yC?K4@*81Y@cYKq+qIVjW}HL!gb zv>%j(%?MRW?iw(6Nm#*Ai9w0&T|dYv2Wt3x3DqWDU$v&mwbkVNxv?)#* zTJTi<*&J8Vg7KZstR=3jB`j<*E6Wi?JnJRhiW9;hL*=_DM3ZjYrHs@G+3q*=55kn)M$JA=0C=Q6C3B#YG z59OAn?!aJ`^}wJ}>}5_145Y=~h+v|h_}c86Q6 z-BwGNXzUgaxK+mfKC6giiq9gBPNH?E5y$*)HK2;tLbW)C2v~wUd|x-D9WtBy%)R1i z3<hw=u0zy#Gj*vmMD6a{$bV({#hDYky3I6arAzij`|hNU5#yPSFRDU zQ+2DbJ`$HKw7o*Fy%$1}a%!rW5s!oh(I@_x?s+ePR^-JiKG$BnTJf0%+U$GPRs1s- zKd+v_HzI3A3j!+wEJS<=0i)cyw(nTt>tkYki>YzBZ>W&816o=+Y)r3`d)eEtibTbzQ+VU4c_q z<^pT6l4M=$t7IfLMF6slm>dv(^nD9I5{|1iwVU zs`c;S^3qTL)R?PS7fO?x4#Rx&(Waw`SV_04XJGJvC_}7io~PIvN-fQ$fOxt&Ia0Tg z$CJ@4G}?mh3_^FDp9*l_Vbn)j(Q-xo5DaQD4)^nI9^8jfgfVMo!e5Hi6Xj&6Er zS$s&LSW5V@X!>tuS*npjF^O(9WcdUjIT46pA90}-Iqmrf*>qN8T)1ymHI~fA(D7^t zH?u#2&%t;;{=o?*8cc%XaSX&h7R5+``QcCw%1yy?=|4)<^pDG;<8n#FqnwaOA`dA9 zm03d~VG8s?RVpo89+KHYv_k%H1jpI5GD-@F7Sa@wP#V2Bftr>lXBP#J9-*{SD~>Qc zpIvD*b;S(b2!AYHNVI85v9&X!%g@J&Pm&Zz@P4z{)wd^7n0LNPj%iCGNlo0ACL7Sk>R&6bzpt(pB%Z|X z<;5}?h1Gyv9+ zCCN&iu#lFms*jiO(*UE$TVD^KA6~U!m2ZM7NobG4KV>E9kfy|&W;hH=%^(><;%;Vr zMVOE8@}(vj!{Nxqu2P!UQJ!A=sEQ;D2-6S>i`e9|%##=0V#r24)$p8GfT5_aa&J)t zC-5_UK5myx8B=C?W5mU*VSN6v3KXAtkt8;#Su)Lq)081KNEzxt8Pe&@2V+%UnpCzd zE6$RWifq}voGk_Za|FFD7Zfj##6mkCw8dNdyu?85!c-Or%-RpC3zmz z&l4J$NVGvCH~1UbmQQk|=7@$_<*5s!ZkX&&&tIE=^s)}9A6&e}wnLvC1k+wooxEZ9u zR%9!-mDoy25h=k@fzUb(`b~;Sso%FlVJqM{Gk>fV7GP9;oNcD9+*Sqzd_f8h|7R#C zBXSujfGRA1Ov&_q74Ky-&slMt=J}8;QgD8SoCl1YEL$05K2w(EN>V1MhIq)fjg%v2 z)MbaiVI82+>k{o{x5;Cct-@Amn@tKfLse{ZY;#EknJrZeD`lH!-Q%SLsydI%MFo+Y zDO;6{f_72{rLSUD>mXH+a*t^w{8n#9pbr(cYAlEqQ6np2E~%bgL=Z*|om!U?Rc)JZ zR*)KiO(^zs#`-jVH61ygNZZzH)Qs=6bA5i{8j=eHt@Us@fXQI_%O!T%7Lb{eAllc- zD~HUqEd-sYmD^Eay_}gNA>tyi6xH53j)WYZ7m6$S8=l~8iy(a+_E_Fnkd4;66n-^e z%%5$mwDP{iR}im_)*<@I{i!OU^D!ko^c_7tv>}vdx|dxpr;;q%%Rzs|dF1zhygF*H z5fbHe0*$_vCy#}@5hU&Pq+TZNC8P*jgp`tEW*r-C4WxlIGW*;_nn)$IBiL%RjJ9T( zC~Q)B2}ukh7#d`=H*QspyfVUkFr`~av#|A1l~;DWjDv-yF4Eki*ss{l8dB5<+dQsy zW(6qTYEtJh`~TY(Lt&t!?)H}RRWw8e(Qy2M1bSa*Oz3ur455cU*_mB01g`3lIyu=; zCF67%lc%6RzB`d-Y>Ld@ArGIHQ!lhQj^BLpG`0YZekDkeyghA;LAI8V#bBp*+gd=P z>#eXf;r);n>oE3`>O|5a$@ij0K^2rky*b8;Ls`#CXE z1eyG*VrxYL1N`+?oTqp`)`MOb2yDH)qB5Zdqn+ci+9_)=)*3 zdSohl82Vu}u-``+c&em*~L%eUT_52=rtpn4N zRvhX9Dcr86fc&C+(U9oDXe0{aNtn0!V=;vtlE{Mm%n>Yp_u)%l2g|l1pg&`rk=fp5 zWW_Y=Z0Bq%$x6t)*-Nv>upIf)0&arc0wu@HHaK@Y6Ats}J(ufkhN zJqT_aStr~_wL*6NH)V9z%F{xA$EItV)hANGg<5J7l2@&l(f2K)hm`k|^@05WgggKt zBmUN77U4k%JMIqyED9kH(T{}8u3B-0EZ=X-sRI?Pg>JOQwn2vV9kL;)+yMv)=z*Y& zQL;hmUX>%l32HW0mOdPux9|weMEAe4NTCkooUJK5*Eh|;4+2J z=NXta1+n3~((;b2Th@e92~P&52l;!7bkoO=WJXD`XN0Fg);0-;=}}WAbyvmGe+=mC zQruax8StMWTm17fFI6D_n+KGnZGb1;w(Y_lP{wV0RT$w!|FhnaY!@x^^g2Jsr3$aL z2G31wJ)}p-_YCLf3ia{~{OJYO0n6C(EMxpR7&ROE)0F4Q796+HW4og4&#{>Akshe_ z^S4=H>$c4O@^buTigB{?F0fo*m~PC18MDowg+%t(DU7wix-Z52zCTaN3eVdvmz}TK zd3|5%ybk@-&MUA$Z|9XBBMH<+&yegzupJD^{~$0ST5KcBpK9q|dfXHihMcccx8YcgrTm0rpp#B1g>tt_0cS z(jc1*8y*2THWM)k?|K&D)5z}QV!nMh*$s=jYrIu5M=%uJ0_4OHc3#LA2Wx}N^6n8GrpfM35rO2W(Uq!(IqsMZo!2~&CDJ;Xb< zgY;NJ8 zgRPeZ_e#MU(d|(iP0pz4m3D#3X&1eI#VHHjo|i-NlRY`WDy_D9%1$^h0W#5PFp>AJz!!kDIj zy%HbOvX2Wv3llodW+rC9hEt(F)}`1Lq8`c)up>xD zm(Q{7Fl1#A&M?_O4QCn0Rv7Ofd&mGr5|IqzFAF8)j=(Gk5%T3^u+>PL5Mw(?`cYUT zQJ_q8VsDDwE*n4Eau8I_?Q|kTH!{g~kMMKbQIGyr z*;>51t-eV0*RGc4AT8F zfLGIP>pQ{192F$Yu$Yrzhio1NdA0c|{7P9?2Fo(@-)p&tXNNrwb`0NZXS(_crT znJj;k+#9Gya&r7R16DwZr7J+a@3<< zza{qwpLo>k-+k%@3f2-C`7c+nPlZpXD_9-*CeQ;kVhz4-t;ju~J2(AmXZ*KmYWGX1 zHLBgO$T9Z&?@&|R-~J4jTfvO`7EpN(G_odWXYAKQ)lkEwa5*Lw^@*-{DmBkm6$AaRYl#S3z$m2orPs!p-$*D8& zv{b;b%yx!6MZW8?Zwtv8@}$Q-55tjW48vbG3VWIa?G3Zgv*dAdlHr~rkNI$)A!o@m zSXJ^ghS{Da&*B-I&Ddh1nmkROMb?>lP|9;=1wxsm{N+p5fd5LyB-tUS+3$anC)n@* zCFg`Mj(;P5$vNS_q&f_SL)70Sdp)3r=wd0{cAlIUSKg+u|Hdr9>irL9Nu%#dutUKA z$~gFh@V{~nW7UNJ#hPGMZO@T&4`8y*eB!HGwz`TQ;5C_SLz}0C`z!TEkEOFYz#`Ya=+`NZ^=WVuc%S!!+q>&@M zz)3m@E}o3S0*}D}P6hc6s4XSm2Kax;BOWe2jEsLwOQ&Y&vjTI21j^}p4h+qj2~jG! zqQVybe=-}wi}2_H3WxLoOf@-jYJs4Dj7!PLb^zPL>M7|^CO}=d@fL!C``9G$Fyw!Hde8iLaNKoc_ zPv-Hr$$W{_yNKC9d?sX`aGT6qZ7&4nn20$(rU+mL9g9%g%?8=2w&MZuz;`j!&m15b zay4uHB}yS4Y9fFd4Y}V_pZm>f@_@(vfYo<87ZwjYz0C;+x{8Rsc!vVaXDPtv$qQ#- zgW(+fOCMOj4NSPn8x9FCK+<9%4eiVhh1K_oQ_5Yuk$elL=)b zTqOwRi{PJO`+Ht}GU=Om)B=pi{^?A?fO(!&;lnzMxx&xj& zHcWEFdLTUqR;P2Otxms8UKT54W2Kh?H}?qGx$}AQ9^3Yc@LlrC*c@-m(53H2ESn}i zLM(fP*Dr)A;&J)32l5YEf)a5>xf>=v^Adj)WReQVj{v`r{`gy&$>=IVMS$ghbQd!> zw`)73%;0}SlkU${mcRsM!TlM@rPGZfI~&}CxJu6{c{+Z7wz7Or$3th*Hl&5vLa($gscd9U-B7vo1sYk9YZwrjg5X+HF^efh0qns!u9xuY;frVpY{w;86JM z^w4NVV$gv^SzYJfuC9rxFld%eY@5DHZ;QkhmM4Zon&SNodyLqEbxrlGQ2p(X=McD|E8yd zee>7?U?VUR{sMMpNFI3;@?3Z_#P$;*+V+;LjsbYAhH2qGt(Iz+JH2Ch%bXdU2W?-H zT@6by~sUWpx9-o!>GY1e_B!p;&PzEzSDu&Nk_ds#T3U3xMUMho~fANw%TYxydDu!}iN z7kJSy&7!x;`wCIk-e#2$EyS>%!84ROwscw!%Yt&~0;X8K#2wxM);l5~U~C9?D%9|3 z2gn6j0e(3h8;$4d=nuiY!u#k3)tneozD?ey2Tvr%q;%Q%BsW*=zb2t3kbeX4d4Ba<{UaF>80-;N!x(VjQ|R@ zZuQ_mpV{gT85}Sj=!NIt&(SSUX2AnShn{R!ydO&6dU7{Bp?B%K$rqPDrRL#SbH|x1 z{th~HrZ|KHRTg=kK7FPX@IO4$sBneS8Bc#3@Gm@l2JWy}f3`INu4I6RyPzmWz4U;% zJClBKHj_@Ctyg>+N>iS(K#B*Rxo6D`%rFdrM4Om`?lc7H2%-SsBc)w^W(!|B+axhPS#vn6N1)vs~HC=PQO*4hhV-C=aXL9I!=i64*0?u&Jrl2P`w|H(` zkZvd_G3@Jx>juodWo2c<8DEAAPbLrb8wF~3u9W{HUHM!VKSlREw=%~7B}|<<^@L$L z+;U)G&n6q-!BEjC7d6vGQdD4;N3Y zd1WY*aU0Wnc;e;mTbEQlg=VEEBajT(7xVQ>i(XE7gE^br2 zMn~_OXw1P@1WcBS*#`@hTd2IWwPeTExc`3q!<>fXW0~ zO5TayO$avAmdi`+wTA8ZWHz*hp+_9BSoU?Bb{m8t5d(TfqtR?&_k2kgq6`}0789%3 z*KG#rg6ixdEb zF`PRR?3g4XT{}$Oz4W)2=h|nYIuAl|h5_?*x=_z0stP<=FC5mE7?=@+$FJDOP$#Yh zM{gzj{289AVjpdtJGQ%jyHV`whT93?+7~3r3`FW!>aOiZFd#wL}}e|4NEE*5JznZId|AZx~h?tOJH& ztzmF~xy9IH7}i@1-8z(h6nv6 z9liWUbtKB6TO(b+BF?7TH>>%#X~Ub{YOw=i=y%^N=9@2m@MgLqkBKU-tx9&4Sy(i# z0lk_^oI~TTmIP3x(?>w@zXY@|4soM=Bp*-d)ptN+yWZZ;D-Q8A2=iX}iVzVlWlvyfT7y3}A@kOa@o6P~J&2>R1^=kT@1-)qZ4<1R8PnJ)4o4h@hS z-a803!(|Y~3VQ^7{Jk__)}{B>XqY~AUQBrZVLl=pFhMW3Vm4j$(g%VTZ7a0ZbF{#b zt?`UGRykHRiv)3&18~QBuhD_l9WcS2IB-P~UkU?|Q0bD5K4Cu_vVdZkcNpQq9S}H_ zJ#iWRlVhvh9qloWU7q`OEFvmXVW>OIcjb{qJb`RL`DBseec*+Hy$bHTRY#nU5uTe! zEIYg5?vTxx64#{MdjFLN>S+Yak60$2FNO90#|GhTRv zV?uW*-q+Ezx}|NsIDzq(5uk!f1I2!Hv60}j=(>Vn0Kq2+c5>WC>3Rw2?uDBe=mY?x zAXWi3eEcnj;)BT29US}6g*QgHLycw_Qh?eU*~NiwO*fbsv*(JCVfxIq)NOkZc?bax zzQw%=%m{1<4kCCMfIGs|gG<*eiGRk3dIZ=#i&==G?C&0QE4#N_*i|c{4te_xn)|b? z(nA;>-h)?9v0JJvVl<{Yg5WTMyAeTU>H~XwC$;{pu;J$z`U?zI_V<~^yU_b51gx=? z-GU(E1EFrE8Tth5AjAj+dl38u@f`HipH=5(pfeMJ0f0Mhaofr@T`kQWt6Q2kb~UYS zU*5a{2Hy)^J2W~(ilEQ9wnw=&&`ETgEaEN{)kBV}llBZj*uT2*p0gpSDb#taZ zv(c|fsB!f7*A09njhw7CL?T`!Q&qU*3y6lURO*XySOk}BnI=>CrS$0Jf#OySSb_Nb zUU!VwI{1>Zn_ z*aV^wy#@5j#}(mW(DQowS^DRX_srdeQLKB0TU9x?qGy&EBRMKWJ;uc&fE!ggu^7Q1 zf+9NNKB!@qboNF4&+p@5m~`qF=0qH5xWgoq#;!f_h*QpueQ86OkWDIV=sJji&09VI ze$&f0UZsv35%l0En(zhK0QUnJ*3-fBdcA;CjrZ%IhrrJvZc4Q}?&3?IBrBp@bGWoD zxawuaQTUr`&Y?GN9*7pR0|WcBY2Uy7ci&KSL>)iY`1JB8mY-hvUfkhI@1-M>4?spj7xlI0bH{cv+#yK*)e0WQ@_NeU*ga&nbaq_^aa=TrPuVOXNB{- zocd~)zS^O$W_a_R`UNii0*8LV=vY3lkA+)ll2RYCO>un8jF0unQ>t`o`{kMWA14_g zrm)DN&xPND(x-=?7(Pv&BqL2OeGbd0b$somO^z9@@OA2!x%A5%?EA%MwDMQm`3En4 z=T~qc0G?3aOfj6Qex!PA(}i4TN|h_6>J}H53<(#m zfbUhKW6kDw2OM4f&V^>zLh}@hyP01##l@sq6%5i(|BZ3!7bq&5qpW6Ja+~a&K{Y$&j|F2EOBSCXy~Ky1LR? zw8>SpNs1Y*=lKg-C?>byMDyfmjEc`4O;zx_;~~gUde(X^qtcO4Ij){iz7jqjKKh7) ze+t;JY_ew2YlW{APAqe*TI;M?=c-vZ6|R{Hl}8X2S(!D(0l39QW@i5d0eCaB|C-Ka z0t%O3uXJqgc5L6_+T1I7eXvu}i**N#AULEL_C`QA6ci7f%WO~r=Kw-FfQ)f4Uz-)v zs&wSe8?U;S+vv;%Vc7uiv+VpQt545=a{kEj$%5IAc}tuHplzrfWwRXmBKXa!nBofJ zTYwkju6Gsg-}ww@z#hiWe$g){e)HcR+C^WoNt3&$$qL zq5j4AXXcN`I6*?TPsCg@PBcvHd87H|-B;_K3s*RcRyf*MI}2BTxNf7PbF*Vhm)+Uf z<>)p#*X?kv+u9~TTvke#Go@sf)Hf9@-b=0o8R=vgn-;KK*{e#yw z+6NsQ2PX?Et`}5aE2y5VS~xCT3ZJZ)H?DfI_{N;7@%Zt*m(nKoxT=>r=d`-!w7%B} z>grgx+10+qv9-(9zRkJ3+qJy=!=4>X87;0}t7B-dt9PHXXTPgw|A)h1LODC%g!0y` zScV#D=a-{zg<>W6?BHz!L&M#{w__y!K;)cNiplB~leHZ;s_QSMzMOt3{bpUmfVDX@Xb?h{`w)abZA8b%CiUGwJB)L~A7=6c*5c&);*br(c~LxNHuxR)wcfBREDLvFVfoEMVV)DhGVGZJ*37bmi85T-Ea4;C~Cs*u7SyxPDh#GC~Jamv16M6vgW90 z9Gmmx{IU60^*1tepG-cT@?^@bxUiDgDGtDh$`xHOmHJZ741On{wL6o84!(3s>i(yV z2d2!Kqu)#6sr@mSAl|cNX2W1Pca^VC#NW5GnlkYBeMNm?`&{mWVg>p>DAj|Qp)YoY8JjdzLmA-mHw!bhW-WImHzos!u|ze4d!g+6|*L;!JH1i$x=cy zG$9MmZU|>(4wmr^duq5VgXLijd&-no26L;68}<|`uk6WDvxq%~@Cy;}FZpXi8${(x z-%sZsQuDtbU6RA6MYFZ+W*j=Rg%|s%H3tOQKZj-$*&P^v69JogzXF4Bw|dwJYXolf z5bT6hNgFsnz`^+s5xk1vGA6he0h?m8*)W?N`=+PjYlt@ipfFsT9V#(Efl~%|_;ypb zdADcAd>XT1GeDgBi7PXI?bcWZM7Z>qO`l;Ae++Z~2S$Vpm;{`J!P1rFzy)gqe56}?pCA3agfCOr zB1aQT`Oo><(SMZkcf`Do$^RXIcRgtIn`L~4f<}&>FXK~EYaylwGWUJd;KD7iKbI(I zS3}OIyO1w~#Zh)Qr*hO$2#Z<&(U40s=IP*BWkDIV)%Ok9>eG1hI!tgK&ifIj9rz2S z`nlOEOo11As^B2H1~vdw>AteqJaZMC+|YZ|vO5k_ZM-Z$E#YyGHkw^$$RFg*iE!)( zeyMp!A}yI0gE_-m`P_6`-WZG5BH6>v06l>x`2`icWgvVwu^q3Qh6R1RO54AmQ8t`8 zeMd(^6^(m_UZLxv$#EAz&osQcV~6K(YvoB?VrFc*4{O0ww=WG z{D$(O)_9M0z@`vhV+(~%OFfi9AN(M}?k%9-SYsHjnNG>d0Ur%OLigV8K2!I0({y5n zE2gKF>Wi1^8V#1dUbxlU;GqV$dxngKI?>Kl(65%TwB^yrVRW(OtCxZElF~7KL(Ox? zu}Z_#9rE_x6Aa6cW0>ePfWTEx*w(~08jcUzjw^#;yTv#POGcinR6oNDp=UX@6)tUs zLt8Pa)g23aAnLv-r#9K8O?GILClgcH<<%H*U7LJOo9xu4xU?w_Z3>J*ba5lIPoz1+ z^IhREy9y0$=g&5|N)|XuTG&0z{!5^z6zYhDFxz%T+jFi*7ha1l#Cxr4t`{x3Rm2O+#f`XBn7c+S^aF-8f1GzD%!jWt zdVwo?fn(a&GkuL%2o50G9uez^UFp=dxpZv~T^kISLnGie?(5pzYuel~!Kp2FX^S1& zVs=$_gHzk+(l$E0Uv^v4<(c)0hDz>AWmsdf^2);4M!oWNy&Bvy(|~4ynFd^i8RdM+ zNKFli%ueXNb^;LD2Q*McV;cR}t-|gyUg?`OH^Zxx^oMg|>`V{U!`U5dqjZ`gN;?ir z0Lq55rpu@&nNKMf4_8h{YG*orTS2xP7Q>>Tab;ns{}N!|Dy?Uc_EV^k)Xi*1`+ga{ zfo2u_ZVfDj!>a6XLa>lZ;on0ch3O@Ch+{%p;nY^Tw3QC6z4CTaDqXRz)xj<5*R?s< zv^h>~o=cnO(B?7hAkn>vh~73gI<+M(ZHYr$!rHviscmv;n;hP+7-aQFBg*)2dZ{T^ zAtsMH)A^ZpGvCL7Y2=5Cml&-E&stu9(Sob@eZ~kk@8LRaOBv`cTUTWo9?Y9$xO%_K zyC}$37OA|ktJH!< z-USLF@c4vw`ssEdjJ+&EIT9{375ccBX1rf~UKg$aJ zRZdH{SpfSjZN*FbSn!qy&x;~FW_mbz)9S|NmCaiWZR`~U9t5LMV}OGIU~+p0!~<+6 zemK%l06QZ)p!ZwF$e4W(IVx1R#v^cGGK=d%`*Qh52OnEmZ)Dv8Ndx0TsBpQ*7Kj~V!(pN zE#c4ZeBSh|X}o6QfU|6st8A4ND#6LDqNhuqC^<{US2?p5xv~~HVi&^_AfyV!x&`LA zg}48O{XhhG0^t{*;YIyClo<$F$FOL;-VqC#E^_MXT)H~P?YzG7b!1C#Mb93FXyW3i^`PlNdKl4zUSihqKga8Ai8wbQ^|DRd$9_ef=0cb9H$I^ z;@GBS;p%DODh#)4riH5syio?1(WQ?Ng6r1=Z{hQYX;~N!vixD87#4UMRtO_XAsjS9 zN7SG<5%4Y-^hE~VuvLbIK+h4)6vQ`rWMjTGn?{yh6w!DUB-*AqsKG>V+7Vm<3KA2f zAU;+ar1xof6h!QSa;39Y&!G#ggFDtg{A0V7UHulf&Le<514D4IDs**^(GEK%aF4$` z0^Hz~6wKm|lf3P|6ETn)#*iL0toOYU#q(MZL#Y8W0eypo3AjqM#G|Md7{o=E82q__ z!YUx600p!MXkc*dPy@ZLcxXj+Ksy<@8ZzuT)Dr+7{2r6qdNdC>Fyz5Ct*OH3P&L0nTCN;Q{ z8jdcVj5S=3&AAqvGq(0@y)(AV6C{~EJ^*Vo_K z5YrIPU5O8CSfIR;5!*0Nd1am&-0m>xJufP>38NPZ`GgR7LDKDQfI{9cNj6}qN(c^D zSUK|e30k5UQ{dh7VS71C0(M$HuecmFo-n&Y?`Vml9Zw~t z3pyb}PK*#KM4i;w(4Z*krw!?~WZL9Vhyl%pf0zb_d8zZ6gg}a;{U#xXnFN+F$RxxL zXL|a~j<%%+#>8nIrgQ{g1r!DYIe4W`pWu6YkGH!Vj`J!rX48!C+yP}(;juXTuoCnc zChy%ljMf7p+cu2AY&^%r;3OtHlEAvwt568re1~If7ty^Jb#5O5KN22G7@iT->-xw? zHP=XL&NO^WHRdx8nkJ1+12x_X8GA*Cy;#LpoIBD-9vdP_-SafPKZP$7vETNULT*&> zdXIwZLuXGWXP-)cB>k-BLZ~yj#+6)iv~@B*V|45Btz#```<(GruK22>izm|yPMIGu zJ4$OOLY?WYuJqQUZJ?Qs*fnRv0Kj)#BmGW5pNf8`&c4>ATkFuR1znF$I#F{yrTAJ( zu`{LAl~U@6!taHYiLmQ+?bqtsopl|qx(>%I{N7A0o=ne~%%3}%S^RYR6X_SiotX<< znF}VfN}euzqU=KIxW$>(=*j|nQ$7_Dk_Bt507fFGqPduOS9JdM=#p#EC1%BY@dB0AQNl7~Pn_y`B)(xKR1JA-1tf z`FfQa+_Dbew+y_{ zafjXWVPj;ae2k2*P?JW;_zE>?gbc2L0YLb0zDL`oS9}>P@E)|DozgUtjdXhrgWYDM zscfhM3WE2j9l6bW)SB4q*1TzblS@4IHe{a(>2Q&TOrFW3A6(SoN zK4b}7k@c>5vH*s88lo&HqP~%c-5mqgs2|zXCFO1uhBJfbk<(EAgvJU_j=*R{6BoLi zXG8b4iM@{4HuySq?JixrL)Q)_Cc6*^4H*n!#l$*C>Y6ItD&KhxR4RIVIf z3_$w&7Jp#<2fp5U%5pYO@j_OY)(Lo#SSx&;x@9iiGRN({UMsbn%~PZSNl`-?ccm<> z5q1r#VjH8Pm9Iyu!6R;fW+^Ngy;jB-)$aq&if0kLfq*T=hnR(KQG~-&=P~G61Z*vk zt-?=0VyVx<`JA|CbYVF^%l;-J-3tKDt`7{DM7H2!M1-Fp_%VXl5xj`ta|Hhf!Tku1 zBX}Nj*oIPtc2qPXfXmo85$BpBj$*}A2ykQ}J;remV%-t!9eBkS3mmC6b#=k>1Wy10 zTR5+i2EdJ&0KqNjBP~Y7=*@Ee*_z22h7);X`iDzSmN-<2Q>y*E8eir1&ws3=7iRLa zqY~BdmL?yl82$51z9epKoH}cY3xKA)(TZ7orCl4(au0xIq3UKRF90Se)T<#R0BS?j z_51}mF@wLifw8d=J`VBtdmC7gpq_m}KgA_%QbP|1o&YeD(}W#bb2R(##v>c26!B^l zIE1&9oF+;hBt-;5wn)QxTDV~J$Si*L4%R9^#wL``S|QJWiogQEt%ie-@b)PbE_P5* z#07nJ>d>u$!3|hPF$R%i5yT-F$7D*&0gGGJ-)-F~J*{(kG`WJWnt@vkVmyKb1UM%b zQQ%=AYYYM@L9#_;C3fV?}3z`&SPO>2I3kJWRCv6f=><4LU%TT zoKbxxpAE-gsw!cfZ6OBLBVdxnj_~8+9Fw-cX%~Qn8!{Pd#ZeGXs3vE*3mh;r;rXe> t3psu(@8GulfBQ~yg%0ke8bE09=2{y(0d#rXgL delta 22131 zcmch92YeLA)%fl;_1;lERa8M#2@on2AvzKWIEs+8C!umDdGBcQEQexjN@OD#+h;)7 zGPWs!IAGZZ#|`9$OKjnggk0>HxFm5&B4Q_T9Dnim-t3+3_5?WJ@1Ng)rF%Q&&6_uG z-n@A;JG(Dk($X(9A;*J){5iN@xpv3KkGlB%Au+uF{;Vt;&k3BMYWFqy8T?HC27eY; zw+9#kAYI)aXb5EYAVV@9|M4D7KCYx(XDYbJ}Aj6;L9iTiL`%D0)2c>F#cp_ecF>u z$%bT8iXp|MH|R~NhE!9UAKl+H*{~hFli* zZ_hL38}dyBh62+#!#I`}&_3Q&XecyIFibE_G)!b^f$ft_lMR!3u9*{pj+XI;DG&%g z${VH%Av-xkkq`>7SjAOxLfB4D2siS_)EsvTe)46QCa7z;?1)b>n(af&RWk?fR83a} zWI|lbK#)b>)O@*?1u@YPWEJb1npzs`+FP18TE#5H8SBE%Ry)*<^`b*lzscI+@JDFs zXlQ9Sig|#N%{#PKaZ_WPI8KhK8#Yz5tFi2Vlmtt{^kzRRNvmxWfUwb zNJvQ$r(=3(i=|GuOHta1n=gWctN$(&k!Z3 zL9Wq)24IY!1sE&n0LBSE0LKWvMxRz+7ojGV;I~s_h!^}JFF^54UTfEneA3BH1l z;Mb>uv}jja3u44T%*Ik!G}e_V_(SH%SQ7#uhU1N0LLkKAM`A$`OBjg-LoCr1^Mf%j zgy4XaR6M8Ri0T+*opNO(11TKm>d$gvjRmYYJ`Blt=tT|2^*Jtd z=pFov>cjr^aiv;UGAtaM?1HLB!matPRCiSc^m=Hb=_V?`6aoh}yO_$2SHuckF{{xP z0%=E|j!}vZ7@X+Jx3(aIA(LHkX02jre#Dmpn9ePEFHiTMGgkeIHf~Ou`js>-hV$a* z`f{)O`p=!Bel>i|+(PxMh1$7O2K-Xi`1?9^f^l<8qmj1cyr5uYrRWEP@zskSl z$M2*4Q=jGUqic(%>yROF13gldvdp1wHd-Blv&xs&Ev;U%v~KRw8j)3b9H~aS)2quD z&8b^iHGBCy@m|b|^2(Ywr>b(^a`DGBxOmLk7R1nq#(F`PLo>#;mUg4D)8Qi;+l}=W zqu2$B*;)}>h0|K6LoEU;{#K*KTE_&=TISgxz!Xo=oyEDTe170;@h3WdH?@})@pNFQ zYz#jOg)3rJ;zk6>gm@voYg@kWsQ6sB3)6nErPLQR)^UtcC^DJTwt-*i`I4g0Im3mD*Df0t?XcsS^-tQ z#6MZp_VRK*fi4Nq(%a`uw+B}8<8|U1K+@1la|(G4{bo)Qy*4+B*Vy$Hd=gD538m?k zdfIj(8N=a%7RK+$bOh!6Xz$zt`efzfwjicla$B(l;)4jVh2kp!9GcrUwOAdRE%gBX ztVYq)Vy=gq!1CHVz-=&NP`3#jgT}U#JZrxN8o@c+jP$x9N4L47^A5loYxx}VcW~DN zLV8MuxnRG^?+1nrg-^O1IQdH8Vgc_eLVecK{ptu?uEN!SOYYT9i7`m1!Bsgsj4G9 zUA-uOEb~LKQLZZt0@$@*`Yz|VU9awXZP%q;ukU(e;PFLvZSfmI@=k8=YrEe7&m?81 zujIcQ!zw`!;Q(hi6~Zv1wvIRey_w=Ey1gdLFH^kVk0X44l>O`^KJ~x}ZCa2NZ1t#I zNFzaqIU$_{?@$REvyKF}M$_Jz37I5FR6$}T@f9+3u80=(V-J!Mz#gU#O(((@s`x3)2U#Odhe#R(vmk$C?YoKk7#${2nsow+hA zE{{ZQ=ET(`O2`{Q;-qM6G2Om0IWE6Vg-ly?BwBEW#V8WdpU9}Zu`;)&fJB27#*r9+ z)+eTuLGCHn3jU>Wy6nIHuVgMi%%G5=>qLmZq??NInMwk)1t);FK=$p9PB21(^ zRz(GjV9ITqLV|@!rzal+#!oTkv9J_NVrO&fW=p^fYw%2EgN1zh*{a0sDUdpKgdQgp z9o8ICjcQW3v-z`EFYzhvf>~0EqjiDl^o7-hX>vTllTC59XRJR(?f_WaRM4^|aq2)} z8r^It50FbH0&XhJ0i$!xP@+$BHC$G}MpfNN3P}RYbUJfQVuU*_xeKfk*rp6>UNbIj z`e6-;6A&gM6lO5TXvDjM@>$f^kVKcR4O0jAXS4A!7E0&1Y9-+i%N>aUzAN0HhuG;F z;wq<+^wu?Y{X{-4JGGgU;^CJjL`vzP*%>4pM892%vaWL#2PUKX$AKWC_&i)(=7|QO zL`o+a&ViO8K>vVRE`k1->Fy6HlcdSA4~L9WcLvFnsYR6up~{YT455^@ zL$7348cW8m)qvh)OF2@mlt;2jF3yC)+%C{ql0)*`u_l$21&%h~Gaps~niftPClyHf zz`)letNZVMoPym-k_C;KcSOx}cG-xL<2h@+hwSwv>vWY;8jKyiln=zmDM-#I`GRH> z4`dgR0+f+0zxxhrqnny_WR(T1=ih+pc&SjDAWbA$YoT${Bxy1!BopPPVl$;Fo~|;P zLMC_lbDVO}NmJcC!NN^gNb-T9MXa@p$yD0e8`(+|zl%xY=%Z}L3R`L zRqo-z1V2%lVC92dC?LcYu<$5oNaSnj!iHp?jmES*C3|G%W)5Z$&Z)-Vx<;*Sst}`O z5SSZTz_fvCgRAhqm(rJ!GKJFTl5CVY$s;*Wq)Kom%B(&v z=Ae@*t!A99v@xVo*5uMsLE}ZC%p7LD&9jVvIBL(mI0=-XJDSXu1*;;JG^Ht7Es#pK z88NA+oy?WyDEiu{Xy2SsQfi`moAkD3WjgK^vIdpFeI7QdSH125sghI*8{NY~s)D%3 zECNmTv}>YAQ_7T0Rp>F8>vX5RMpd0ps>nQ;yTTRCXdzXsi5fD`)%Genv@OxEl~h4r zw~;btTdQCkO>@<|nHQ6V7@@s=x@m^AfNlLOfP8eG*>&2aY_(@uQiVPju)?*@bfR-W zFSHqrMkE-B7{S~J%NTbqmViwxWs+N1LWeut{oH?oB`x%r$(UpmCU-trILhSOIB5}C z1jI)?KBq0&dD99zx55Z^v*vV^)Qu`^zoH9nW0DrhLgoxv$b5FUl11#UKH_(br;}R3 zY$Ng_RUULZJ;HOHTuoHa-^A!|9zAM>$eVJ%1-TSrJo8hP)v|8%Pzuat{$b$O@Cq{D zv!tS62csM*Ee0=aiDC>Xh|x1!V`*n=KwP&=OPC6b=pd;=j8JK*g2Yn#Tx(pS(WA&x zVXIp}WT~)?9%@URxQr|V2FqP|PO-&;ueU@Yx-KTV+sQIvIvUFgWo1e&7f2h-3NXLS zR;ZH~19Z4GW8z4~!gicVzNdN{sbg>l`LZw zbk$--SKYz7YA0C?R_jjB&OOZ0;X5LILETo664}NDg8eB29bHUT2zSxXOljr26s(mf z1ql>)j~+eNsU9sap^MFN%W4%I?IDp+axba%wC#}cLrCdy%aV=qyCLl!cN$>rgOpx+ z-mI@H5f>@w?pJchbEE_&$7#|^1=azw(yQJ!Nb#6epbkn_%9EzLN4S?R?U+bEHHRe# z_ruZxNGf;prB$#yJ{u?LShC8S1M^bmf48}Akdo}Ccm1>n@hAC7bWSIRF0tV%|rD3U+ z44^P;z};A@V8ozo#)yV)x9;=M2Cj9UE2b<>dv8m$t+P^BF4))%vQ9aNLtieCB8h=S z!kht!0i0Ver1hAtNU0<1Nr7`kP(bP+ERT?}FhbUlNXV@t>x9F7*b*M=O+f$^_pt$U zgemERWW96psRL89P)v-QFJp0B2d?}_Voo2c%u5Up$r~op21OsrHS~Ly2c>(MY=Cu9 zO1P7OqrxMgZU#XTG<0Zx0=-ZgMelA743P^zO4dM;$H-b-N^K--I@Dxw2TwLg^}?2$ z87g!qzHe$H^`hlw<>fZ6==5GnNDZVx$Z)#iQ-qS6l^59LsgT4mChOxz)aY8NPms0p zGcL0=#CDQp{fIO`J5SwUpK60;%FQcsYlax1sPHtS`s2}7%UdwDZW1!z|4C)62Ugq^ z^KsXNRIgN%|8PQjT%M4~_fAMqkLwzlkdne=#rcWTjXDE%0<9? z`3|Or^cl|Me3msp1x_bw#3nWGZS&a6@Tt#NV4wmopkAzLiKsFg7tF_S%Jo! zc}(@NY;C@EY5ge8b7>uvEhnk{3(yLukg8GAJ?Ih#?CUPV2hcjHk;Kb$UL?$URaV^3 zc3!|zDec@^7y@^CTn)mrg62*Y_NqpBuFsz>FP>)#`hs&ryyzMcAl!fp!bN5gpcJzRCxaqU=n;dS zH9$|j#0n?byv5L~>A51j0wiF2&ub17VA*9P0+`qmsfjdsHe9iEy7j(SGcop$JO$^d z|AGLUk``$eUhNyTKie}Crjv~%zdMNJv)Wu6P~%~H2sRa(Fn6QWqFAeP!fT_}s;!C@ zFj{9nXP?T8;gB&PDKC>f&dX%O;{*>Jsg1M=mz=xaiKKN$JZ>D@q*l@jJFSadDq{{} zXs~hyH`j+fWgf|c9l&qlF)8x^H_%PbS-W1-n zjvCj|Uiy|Vyw#@?en!vSk!*Wg$pQ?NcScVGJ-_Z5CL4QIViHKnIeSAa24&Q%S{L!3 zNC~Xb|3FpTgWE1;seXNnK={CkfIeWGK01`-is+VG<$jY}$ZhC!joZ>7DU5N#Qn1N((pQ zbgm(iFyx`dlIL2tQN?yww@Ws5-ywCA9YTx;_IA%0@()|zzR-#tLWWI&=6>YGxzSLp zup)y3(hn5v>?SsbwMN-m=??M(atCWOA-4;cos;`pga|)ydatj#1`A6?;cn51zWriFCPEDcD`2j?}6|Z=>BaUJ#{N(P=hF#=NPd$Ws=(F9SHdz(#^sGk7!uM8s7wNu{9C{QkLzT;C`fpqFUkks!kq+*H>hlGMO9$8dW|R)T2MnVQzE5`F zLyYg3 zu7RHMd^}=T%8EH84@d{4L(*aL06E0$)m&y0`p7|Y*du;q&n435HZpr&Cr4Z{@G#tl zPI^#b>axiL$||LY6!|&ipjUps!n4UGhrIG1R^;cA!^dn2KSer99wrYtO;-syO8TAQ zshEyDV;bI>Tk;6;dc4U}A0>y#0}S^tIXHs*7&l}?ZoqH;r6uKuqwQQ=GSApPQwSQ|vYQb_bc;VY%Yv8lpW*fMOobdnq= z_sf-70$CWu56hoG@*ufSW<=DH2c`SSap`_iE8W}6_pHBx9R9`_(Q2HW#KrSHx08EC z6S>!O!2l!lUU?58<{lwZ$(VG&l{c|>RNj6?m49~*(0@1w=+}heF33{hpX30H$9-f! zz`v0DQQpANeJKAUzTR^0<<0R5vfp@*eiSLL&2#_dUDiCzObzd~FZC_Chi!@2%)h>s z%>Xr%r5viL|9?i+@-!SY@qEZ}6GJ9W+u@yh-#dLt2|Q(_@9a-X`;mh9cS@P{ z@&ZSGgxiibGn`k(K9?`Z;FYL?di1`Xc?Il<U$zVE;==XVezK4ckIb9!N=3 z-!RKb{4gAbfX$LG4kX+5i+O~0=RjO5ox<*dkd9Dx_c@}GP7B@AkICK6^GtVpG34vS z^cy{2n>UdIGHtToG8U-%E9oCm18uDhi1#>&_jnNxa1sx^0r5(CVuD$hcI$z7&<%*s zm456+F&HT-4#L^{C-GXFIYn`#>mH@rh<K&BXl{>wuy}8OiiIU7;uNSg~%t3(Hks%&22=(&Mf?ILhO7 zFb=$+$6b3BVl(T*p8yOc`{=4_J+@*}{%g5~=u zEq(m{7@NVxvGfGwMM?vpEl(<_dv@#@A(5N~yePSA$rEG%V$tMD?~d%%^>FyW%tf6Y zs!dWJs)dp<^unV_MSz?J-V&Ul-NJ9-wyIZiTX;yz_ZkbaWG@@!apZ^WK87B$$A{eX zWMith0#7#n|2fat`#{u^G_f3-6c5|7Bc~Q!&6l1cPl*#0e$rEbo3Im%*XcyBN$_dm zA@a05MJ^bnB0@NO3(Bo%@!|pHq6p|FDJhD$Te=38MaeYxP)I5avv}D1tB}rl2{@UY zgTg7W?f7s|LIMe9Q9aw#FJR$R7Uo*lA}#(L{q~?_}fkt40N z?@*dLgB^BDKm(LXQsiR>w7*Zwk3VoYna{e>!&E=oaX3Xg78My;vB5xp-X%&fB6Y4X z(JSqqbl$5)*`6;^k@HGyrbId9ynD2;lw3&J;Hm+b&Woet`ci1k!4&>H?Syvc)84)) znfrp9aUV7f-W`j9(&O42OpVerCPDfsOpcxw)#$2&=oKciiEcO&V|$i7>*i}M>_hqeh2xfn0#yHu?Fm)_thC#{_I$WaoybOHr<+X0q22Mriy* zr&pHj!ZD7j!-M`s=Tv#IX9frl#0P&K&m2ewxd=2T_4!DHLa6kTqLm(a&Sx5UbbVx8 z%Pq_pJ$<}n)_c=A@L1Ypc72%b}^{CG`CcRHv%QARhd0 zPIKuBHbwc221*veuUI+jjl!8mg1ZwH=V3kOA8^RQsKAiE_HZJ+TKDgVV`66X`ARQ) z@mfM&cAnScTH8s|#fy##qA^qcT$CcNANBWVyvQjFJ=Dmd|)!%mm^$0qn>p1Gg!{bqI^<-jqgb*;GRIHh+T%jIX$ zCyvbzyBjzc?*Xv*Q+)0v4h#exe_m6t9>Y%|cpAYu1ZaiDhY<85IEvsA1dr0MPbG!E zh(WYS;tL46Xy)ns#ETdlM6eft1@_bNH8OU=D|(%$XQ|Hc15ceEr_MZxNb?X>BB%n8 ztq~7l*pA>ZfCM7<`!~J`qP>JkzGD!RT^2{%kb7;a7P!71XQs>>O2)ik>~& z#+M8vK9R)pm2}2HD%kM4fn1-@vA`D`-9L~IxGxUOQbnk!=E?i`5_;&#WBhVj{?z=i zzW|a&2Y@jP?_$!vr;hmEXFjt;HBLqMJ+nnq0(o@hPxE;Pz4NE3{BP)q zpDw2H&(`XSAw$fdTb}(SU;zZX)CCio#LKk#x!cMenDti#R{=nGT!))O)6medRYY8e zul&wxiy-0>AzS586^wWA?lUeWTp}DI^KB*pHzv-h@J&u&v)-dJ^kIg`Kn7Q8u{XQA6Cs@xq$^Q?&M>3 zBV`@?KmhwLfQUY+SV(_=!K(T@H?a1i#H+4z^f!aGx?s$iL#Mt}2VV?;U_R}8=~16t z41UGY%9rQy3u*7mse$l81@2QkX%9V5UwC=C4rL-H((hh2r%!;qF2B4KQ^#g_H?>5B z4=He6fq5yLTRPh7tt@F0-T%suvRD~eWUNgNANl2Ax8#th+^#cfy8qR5(Br3HP2u?< z`m5JA>6sX@yi+C)zVikWxu&Llm&%~z`FhZL|E9csa7y+QRk z8tVJ@taR)}z+@`HhFJub(QPxfw|8t2S&D{syq(Ca=-qGY6SIB9Sy+MG7Z{q2U=D)0 z^cQbe@hLRJZuR z!q<7|5#DO9!&iy7 zK@&5Od3=f9Q#)eznRRe-ho%6sp&UUve06Qj4`H%yomFN z>wQ|xTM3{mlU}}@l38zR5b9@c2Vc?>1*to@OJVOsy%BYZ46R##xvK6;RoxB_-rfy; zZ@G>s-im?N_YU&xGc%yJtC5<4hW=LY#qNTxvs3chG@Ww}d+fK%q8!1En?%uQw${n7 z-#gT^maaJPL1fl9e15jC!%u$czD3x|WH#>^CVYXuzQH2D;{FfJ zu+ndSmzsYYW(PFl8|sZPqRkdD1T!}wz$Yo<7DUjrw^*#Ag;sr-UGW;GUcyv0yd*Ae z#_0P9I$2u1Ac#RMWClZoR!RICb5~*L0}MS!pZu^mGYNw!2$BIfBIYhxyu5DSoa&l+ zb5_^QUa@rHoK<2cPgCL|Y1T)}bODGNNVk8~pc1?3Up|V>dl6Ey-(>Hhix-eYfT?~f zm;$TVVzf8{Ft81Dr@elg_z9i(arKx0?*L@OC@A<^UXV7myiJksAQu_kI{~-7`0<>o0 zZ2*vd!=`4S1t#x4%=!$$pAh_+qf36jVvQqw#MT1m$c#~Ngflzb-eNYo(?XrIVV(2S z85Z|PG?>EytOmNis)5r8OurxferC!)p)N--D_hqhSn4cBqgkF8aAFoiX!#%F`H6Jh zAGYZxVZv1U-XF&3SclK2-~M5`t{79N(UL!AOwPyPTmX)cPNM){We{8H&B&oe^ua9n zSQIDEL(Jcc#ApPy|8WBS#UDHKmSdF79{)m~RE>q=w{l>D_#H}wLj)c{6ute^?Yae+ zm^AR+r+ayRKHdAdIR+;%hretF*h~FR6$$xb%b2xTkhzfphKvZ9L-{7{|KjzqGQ4E# zZ3DVpk+l14WaJ}&Wx2P6Ty?;q_yoS7TF#bzFANP-F)DfBimv z>8}lZ??C34KTxZ$rDhF`{f9NSeV7Xmt5pqU7M-trH*?ky{Lfnr@T$IWmtiQ!_8Gyp& zu22m%w-1?ayV5MOs68-=PoDMl*U+xW)azVY*lNc5oOd(JFK3oBrc|v&O2dwH+jNP@`N*+&Rd?i{mkw5ufzGrY{Nd9@xUU2 zkidzl!yJI?T%bPfD+CbLr~PdVrw0@+Yn5tfZT(O~)0MT&a@3h9+B~Bp;YF%y5E&Np zgy1siSF65aaG0yX$Um{Q`s-Y5n2jIGn0mhG-Hch6GoYhZ0lb<&ekd}BwLCh?#$V?U zdbNDr<@g0x;ul=!qT+1)HG54spJ6+={c7o~3l-08y%c$=;jP%Anw3MV3|DH_45iF? z*tb8tzxw3Z6BAB|XXl-%J|BKHEBl20(M>0+&+w;LJbe3^WtTIjoKHQ!?755!stbiL zYoE!z#9uD1x|~^c2{>Ewi|Q3awX23!uen^iW@uggY-^%-<`a4X!6pl_OwubpRMZZw8igFE_rh8+4WDZzcyp$)#)Wy zXD)=^MAbtO5{Rk?0D2Qu4**0#^~Qke4dqBftJV#zZ@99m5ySA`(&h|6c+-}V@a6RF zSJJl+rEmY|-=LG?ADA?xFT5Nv;T&wgF4ILr$D#_aZQh|UR9k%~28OjZxD^1~rfOCuU_+~zJ zSuz(9b8r5zYzIaghULljU*+@EW(K!wSLOJMrhM)aUmk<^*HbG}@cx#%JbST@`&pg} zV?Qg1gvi?}9wTq7RT$CA5uN{%K=s?9c}slMZ_fz8{C9k`kn~RAtYVD)Tvwj9JcqlI z&CsvpsFoL~ujI|gJRA2x<}7VP9`|7`%lt6UzcE(*;gs0M2=zz7S}grhgcg!MiY*Vu z*vDCGCY6^Za2LzsS%HgX3I65fW7HSRGIPqyqtqA6Ls_Yd{30qAq{2Nx#}FYbr)SM6WC_dS$|EYOfkbtlTw8U%e??86da!zGZbeO&dawpc zVEYGava~FtCNG|qT<%*wwE`Z<4X!AaOV)D!6}6@6!CL>^irQ&#pUhe~SUXKCrF!Q+~DeHx!~#;{|ZC2dT_Nqx55yn9yA2X1r1?ZmTriiXPd%`HU{NYG*06N z8;j+Vjnn)q1igAt&_q-S32={*O9}~EIbC1D`gyPkTGw2}4K_jRnxS<~(7I-5T{E-} z)0?4jm<~TVUgTG?kykgujK{8Xk%jzTEq~Si&=fu~lx=9th76IdO`7a)O#u~kd!5YT zT#xz8;jF{ZgFwom?J~kz(V^YcVzw5^yJH7&>OF+uVFb)|X09-EU76R)eA*GOQp9Ok zd=x--fb87;8R`Ei7oa1cp}pSR=JEX}*@1f!ixk2T5*e8bcE2J%SzbKdVJ|4+(^Pr7 zb2UZ$LESj!9W$p_(`MXekrxK&4?A?04iWq<=Jmo(&K|7wV|#olpCnVF_R>pA+1NSJyjY@1mk03SYx`w#-gcEVgE?aN^a1EbRz8+iGgWv=LCTSnDP!C@b+a~s6(me={AYej1g%tWQ zdK^Qm?2Fd&g|?HJ(1~Cjrk+J`0s-UxK@7c%;Fk!{EfViQK(WMZkgX%g`31H{*kh=x zgI%{DLOyG;)0e71d<07{<9;XzUX<(4K6xE~By(7^p4S!+bGL+Z?Sbq0Ng<(H_z2Vp zNVhLp&*w$V3)jM&?*XgR?f0zbCnj?$ZPc(5$f87svZvb**YT6;k78qFKM{F-3PCdf zhZa_2@LedBEnB^a`!E!OSvvSM8-7SF6p_Oagd;eLd1}ixi$i0ow{Da-dhfDN-oO`W zuxst@8$dnwZs1cx;}Cxgf_MZ8_U9m#Pqcrqflm%d!lYybDR#elJ|RGl;Zy`^_VM+6 z8lP@oUeAxW;WAMyLx3%kx2Tw|Oo!+d7i(@tsMHl k|6k>@hdlnz1vT)W^W|G(c@XoBVQ=$wmh;t3@>zfUFU8|cSpWb4 diff --git a/qt_app_pyside1/controllers/model_manager.py b/qt_app_pyside1/controllers/model_manager.py index b78809a..d586a1e 100644 --- a/qt_app_pyside1/controllers/model_manager.py +++ b/qt_app_pyside1/controllers/model_manager.py @@ -52,7 +52,8 @@ class ModelManager: "confidence_threshold": 0.3, "enable_ocr": True, "enable_tracking": True, - "model_path": None + "model_path": None, + "device": "GPU" # Force GPU usage for Intel Arc }, "violations": { "red_light_grace_period": 2.0, @@ -97,13 +98,27 @@ class ModelManager: # Initialize detector print(f"✅ Initializing OpenVINO detector with model: {model_path}") + + # Store current model info for stats + self.current_model_path = model_path + self.current_model_name = self._extract_model_name_from_path(model_path) + device = self.config["detection"].get("device", "AUTO") - print(f"✅ Using inference device: {device}") + print(f"🔧 Model Manager: Config device setting: {device}") + print(f"🔧 Model Manager: Creating detector with device: {device}") self.detector = OpenVINOVehicleDetector( model_path=model_path, device=device, confidence_threshold=self.config["detection"]["confidence_threshold"] ) + print(f"✅ Detector created with device: {device}") + + # Verify the detector is using the correct device + if hasattr(self.detector, 'device'): + actual_device = self.detector.device + print(f"🔍 Model Manager: Detector reports device as: {actual_device}") + else: + print(f"🔍 Model Manager: Detector device attribute not available") # Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic self.violation_pipeline = RedLightViolationPipeline(debug=True) @@ -128,18 +143,48 @@ class ModelManager: traceback.print_exc() def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]: - + """ + Find the best model path based on configuration. + Now respects the model selection from config panel. + """ if base_model_name is None: - device = self.config["detection"].get("device", "AUTO") - if device == "CPU" or device == "AUTO": - # Use yolo11n for CPU - faster, lighter model - base_model_name = "yolo11n" - print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)") + # First, check if a specific model is selected in config + selected_model = self.config["detection"].get("model", None) + if selected_model and selected_model.lower() != "auto": + base_model_name = selected_model.lower() + # Convert YOLOv11x format to yolo11x format + if 'yolov11' in base_model_name: + base_model_name = base_model_name.replace('yolov11', 'yolo11') + print(f"🎯 Using model selected from config panel: {base_model_name}") else: - # Use yolo11x for GPU - larger model with better accuracy - base_model_name = "yolo11x" - print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)") + # Fallback to device-based selection only if no specific model selected + device = self.config["detection"].get("device", "AUTO") + if device == "CPU" or device == "AUTO": + # Use yolo11n for CPU - faster, lighter model + base_model_name = "yolo11n" + print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)") + else: + # Use yolo11x for GPU - larger model with better accuracy + base_model_name = "yolo11x" + print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)") + + # Ensure we have a clean model name (remove any version suffixes) + if base_model_name: + # Handle different model name formats + if "yolo11" in base_model_name.lower(): + if "11n" in base_model_name.lower(): + base_model_name = "yolo11n" + elif "11x" in base_model_name.lower(): + base_model_name = "yolo11x" + elif "11s" in base_model_name.lower(): + base_model_name = "yolo11s" + elif "11m" in base_model_name.lower(): + base_model_name = "yolo11m" + elif "11l" in base_model_name.lower(): + base_model_name = "yolo11l" + + print(f"🔍 Looking for model: {base_model_name}") # Check if the openvino_models directory exists in the current working directory cwd_openvino_dir = Path.cwd() / "openvino_models" @@ -201,6 +246,55 @@ class ModelManager: print(f"❌ No model found for {base_model_name}") return None + + def _extract_model_name_from_path(self, model_path: str) -> str: + """Extract model name from file path""" + try: + # Convert to lowercase for matching + path_lower = model_path.lower() + print(f"🔍 Extracting model name from path: {model_path}") + print(f"🔍 Path lower: {path_lower}") + + # Check for specific models + if 'yolo11n' in path_lower: + extracted_name = 'YOLOv11n' + print(f"✅ Extracted model name: {extracted_name}") + return extracted_name + elif 'yolo11s' in path_lower: + extracted_name = 'YOLOv11s' + print(f"✅ Extracted model name: {extracted_name}") + return extracted_name + elif 'yolo11m' in path_lower: + extracted_name = 'YOLOv11m' + print(f"✅ Extracted model name: {extracted_name}") + return extracted_name + elif 'yolo11l' in path_lower: + extracted_name = 'YOLOv11l' + print(f"✅ Extracted model name: {extracted_name}") + return extracted_name + elif 'yolo11x' in path_lower: + extracted_name = 'YOLOv11x' + print(f"✅ Extracted model name: {extracted_name}") + return extracted_name + elif 'yolo11' in path_lower: + extracted_name = 'YOLOv11' + print(f"✅ Extracted model name: {extracted_name}") + return extracted_name + else: + extracted_name = 'YOLO' + print(f"⚠️ Fallback model name: {extracted_name}") + return extracted_name + except Exception as e: + print(f"⚠️ Error extracting model name: {e}") + return 'Unknown' + + def get_current_model_info(self) -> dict: + """Get current model information for stats""" + return { + 'model_path': getattr(self, 'current_model_path', None), + 'model_name': getattr(self, 'current_model_name', 'Unknown'), + 'device': self.detector.get_device() if self.detector else 'Unknown' + } def detect(self, frame: np.ndarray) -> List[Dict]: """ @@ -392,8 +486,9 @@ class ModelManager: if not new_config: return - # Store old device setting to check if it changed + # Store old settings to check if they changed old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO" + old_model = self.config["detection"].get("model", "auto") if "detection" in self.config else "auto" # Update configuration for section in new_config: @@ -402,21 +497,46 @@ class ModelManager: else: self.config[section] = new_config[section] - # Check if device changed - if so, we need to reinitialize models + # Check if device or model changed - if so, we need to reinitialize models new_device = self.config["detection"].get("device", "AUTO") + new_model = self.config["detection"].get("model", "auto") device_changed = old_device != new_device + model_changed = old_model != new_model - if device_changed: - print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...") - # Reinitialize models with new device - self._initialize_models() + if device_changed or model_changed: + print(f"📢 Configuration changed:") + if device_changed: + print(f" Device: {old_device} → {new_device}") + if model_changed: + print(f" Model: {old_model} → {new_model}") + print(f" Reinitializing models...") + + # Force complete reinitialization - let the model path extraction handle the naming + self.force_model_reload() return - # Just update detector confidence threshold if device didn't change + # Just update detector confidence threshold if device and model didn't change if self.detector: conf_thres = self.config["detection"].get("confidence_threshold", 0.5) self.detector.conf_thres = conf_thres + def force_model_reload(self): + """Force complete model reload with current config""" + print("🔄 Force reloading models with current configuration...") + + # Get the configured model selection + selected_model = self.config["detection"].get("model", "auto") + print(f"🎯 Force reload: Config model selection = {selected_model}") + + # Clear current models + self.detector = None + self.violation_pipeline = None + + # Reinitialize with current config - let _initialize_models handle the naming + self._initialize_models() + + print("✅ Models reloaded successfully") + def _bbox_iou(self, boxA, boxB): # Compute the intersection over union of two boxes xA = max(boxA[0], boxB[0]) diff --git a/qt_app_pyside1/controllers/video_controller.py b/qt_app_pyside1/controllers/video_controller.py index b6de05f..89dbfe3 100644 --- a/qt_app_pyside1/controllers/video_controller.py +++ b/qt_app_pyside1/controllers/video_controller.py @@ -73,6 +73,15 @@ class VideoController(QObject): # Debug counter for monitoring frame processing self.debug_counter = 0 + + def on_model_switched(self, device): + """Handle device switch notification from model manager.""" + print(f"[VIDEO CONTROLLER] Device switched to: {device}") + # Update model manager config if needed + if self.model_manager and hasattr(self.model_manager, 'config'): + self.model_manager.config["detection"]["device"] = device + print(f"[VIDEO CONTROLLER] Updated model manager device to: {device}") + def set_source(self, source): """Set video source (file path, camera index, or URL)""" print(f"DEBUG: VideoController.set_source called with: {source} (type: {type(source)})") diff --git a/qt_app_pyside1/controllers/video_controller_new.py b/qt_app_pyside1/controllers/video_controller_new.py index 794059b..2e762f5 100644 --- a/qt_app_pyside1/controllers/video_controller_new.py +++ b/qt_app_pyside1/controllers/video_controller_new.py @@ -1,4 +1,1958 @@ -from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer +# from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer, Slot +# from PySide6.QtGui import QImage, QPixmap +# import cv2 +# import time +# import numpy as np +# from datetime import datetime +# from collections import deque +# from typing import Dict, List, Optional +# import os +# import sys +# import math + +# # Add parent directory to path for imports +# sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +# # Import utilities +# from utils.annotation_utils import ( +# draw_detections, +# draw_performance_metrics, +# resize_frame_for_display, +# convert_cv_to_qimage, +# convert_cv_to_pixmap, +# pipeline_with_violation_line +# ) + +# # Import enhanced annotation utilities +# from utils.enhanced_annotation_utils import ( +# enhanced_draw_detections, +# draw_performance_overlay, +# enhanced_cv_to_qimage, +# enhanced_cv_to_pixmap +# ) + +# # Import traffic light color detection utilities +# from red_light_violation_pipeline import RedLightViolationPipeline +# from utils.traffic_light_utils import detect_traffic_light_color, draw_traffic_light_status, ensure_traffic_light_color +# from utils.crosswalk_utils2 import detect_crosswalk_and_violation_line, draw_violation_line, get_violation_line_y +# from controllers.bytetrack_tracker import ByteTrackVehicleTracker +# TRAFFIC_LIGHT_CLASSES = ["traffic light", "trafficlight", "tl"] +# TRAFFIC_LIGHT_NAMES = ['trafficlight', 'traffic light', 'tl', 'signal'] + +# def normalize_class_name(class_name): +# """Normalizes class names from different models/formats to a standard name""" +# if not class_name: +# return "" + +# name_lower = class_name.lower() + +# # Traffic light variants +# if name_lower in ['traffic light', 'trafficlight', 'traffic_light', 'tl', 'signal']: +# return 'traffic light' + +# # Keep specific vehicle classes (car, truck, bus) separate +# # Just normalize naming variations within each class +# if name_lower in ['car', 'auto', 'automobile']: +# return 'car' +# elif name_lower in ['truck']: +# return 'truck' +# elif name_lower in ['bus']: +# return 'bus' +# elif name_lower in ['motorcycle', 'scooter', 'motorbike', 'bike']: +# return 'motorcycle' + +# # Person variants +# if name_lower in ['person', 'pedestrian', 'human']: +# return 'person' + +# # Other common classes can be added here + +# return class_name + +# def is_traffic_light(class_name): +# """Helper function to check if a class name is a traffic light with normalization""" +# if not class_name: +# return False +# normalized = normalize_class_name(class_name) +# return normalized == 'traffic light' + +# class VideoController(QObject): +# frame_ready = Signal(object, object, dict) # QPixmap, detections, metrics +# raw_frame_ready = Signal(np.ndarray, list, float) # frame, detections, fps +# frame_np_ready = Signal(np.ndarray) # Direct NumPy frame signal for display +# stats_ready = Signal(dict) # Dictionary with stats (fps, detection_time, traffic_light) +# violation_detected = Signal(dict) # Signal emitted when a violation is detected +# progress_ready = Signal(int, int, float) # value, max_value, timestamp +# device_info_ready = Signal(dict) # Signal to emit device info to the UI +# auto_select_model_device = Signal() # Signal for UI to request auto model/device selection +# performance_stats_ready = Signal(dict) # NEW: Signal for performance tab (fps, inference, device, res) +# violations_batch_ready = Signal(list) # NEW: Signal to emit a batch of violations +# pause_state_changed = Signal(bool) # Signal emitted when pause state changes (True=paused, False=playing) + +# def __init__(self, model_manager=None): +# """ +# Initialize video controller. + +# Args: +# model_manager: Model manager instance for detection and violation +# """ +# super().__init__() +# print("Loaded advanced VideoController from video_controller_new.py") # DEBUG: Confirm correct controller + +# self._running = False +# self._paused = False # Add pause state +# self._last_frame = None # Store last frame for VLM analysis during pause +# self._last_analysis_data = {} # Store last analysis data for VLM +# self.source = None +# self.source_type = None +# self.source_fps = 0 +# self.performance_metrics = {} +# self.mutex = QMutex() +# self.pause_condition = QWaitCondition() # Add wait condition for pause + +# # Performance tracking +# self.processing_times = deque(maxlen=100) # Store last 100 processing times +# self.fps_history = deque(maxlen=100) # Store last 100 FPS values +# self.start_time = time.time() +# self.frame_count = 0 +# self.actual_fps = 0.0 + +# self.model_manager = model_manager +# self.inference_model = None +# self.tracker = None + +# # Initialize device tracking +# if self.model_manager and hasattr(self.model_manager, 'config'): +# self.current_device = self.model_manager.config.get("detection", {}).get("device", "CPU") +# else: +# self.current_device = "CPU" +# print(f"🔧 Video Controller: Initialized with device: {self.current_device}") + +# self.current_frame = None +# self.current_detections = [] + +# # Traffic light state tracking +# self.latest_traffic_light = {"color": "unknown", "confidence": 0.0} + +# # Vehicle tracking settings +# self.vehicle_history = {} # Dictionary to store vehicle position history +# self.vehicle_statuses = {} # Track stable movement status +# self.movement_threshold = 1.5 # ADJUSTED: More balanced movement detection (was 0.8) +# self.min_confidence_threshold = 0.3 # FIXED: Lower threshold for better detection (was 0.5) + +# # Enhanced violation detection settings +# self.position_history_size = 20 # Increased from 10 to track longer history +# self.crossing_check_window = 8 # Check for crossings over the last 8 frames instead of just 2 +# self.max_position_jump = 50 # Maximum allowed position jump between frames (detect ID switches) + +# # Set up violation detection +# try: +# from controllers.red_light_violation_detector import RedLightViolationDetector +# self.violation_detector = RedLightViolationDetector() +# print("✅ Red light violation detector initialized") +# except Exception as e: +# self.violation_detector = None +# print(f"❌ Could not initialize violation detector: {e}") + +# # Import crosswalk detection +# try: +# self.detect_crosswalk_and_violation_line = detect_crosswalk_and_violation_line +# # self.draw_violation_line = draw_violation_line +# print("✅ Crosswalk detection utilities imported") +# except Exception as e: +# print(f"❌ Could not import crosswalk detection: {e}") +# self.detect_crosswalk_and_violation_line = lambda frame, *args: (None, None, {}) +# # self.draw_violation_line = lambda frame, *args, **kwargs: frame + +# # Configure thread +# self.thread = QThread() +# self.moveToThread(self.thread) +# self.thread.started.connect(self._run) +# # Performance measurement +# self.mutex = QMutex() +# self.condition = QWaitCondition() +# self.performance_metrics = { +# 'FPS': 0.0, +# 'Detection (ms)': 0.0, +# 'Total (ms)': 0.0 +# } + +# # Setup render timer with more aggressive settings for UI updates +# self.render_timer = QTimer() +# self.render_timer.timeout.connect(self._process_frame) + +# # Frame buffer +# self.current_frame = None +# self.current_detections = [] +# self.current_violations = [] + +# # Debug counter for monitoring frame processing +# self.debug_counter = 0 +# self.violation_frame_counter = 0 # Add counter for violation processing + +# # Initialize the traffic light color detection pipeline +# self.cv_violation_pipeline = RedLightViolationPipeline(debug=True) + +# # Initialize vehicle tracker +# self.vehicle_tracker = ByteTrackVehicleTracker() + +# # Add red light violation system +# # self.red_light_violation_system = RedLightViolationSystem() + +# # Initialize scene analytics adapter +# try: +# from utils.scene_analytics import SceneAnalyticsAdapter +# self.scene_analytics = SceneAnalyticsAdapter(camera_id="desktop_main") +# self.scene_analytics.object_detected.connect(self._on_scene_object_detected) +# self.scene_analytics.scene_analytics_updated.connect(self._on_scene_analytics_updated) +# self.scene_analytics.roi_event_detected.connect(self._on_roi_event_detected) +# print("✅ Scene analytics adapter initialized") +# except Exception as e: +# self.scene_analytics = None +# print(f"❌ Could not initialize scene analytics: {e}") + +# def refresh_model_info(self): +# """Force refresh of model information for performance display""" +# if hasattr(self, 'model_manager') and self.model_manager: +# print("🔄 Refreshing model information in video controller") +# # The model info will be refreshed in the next stats update +# # Force current device update from config +# if hasattr(self.model_manager, 'config') and 'detection' in self.model_manager.config: +# self.current_device = self.model_manager.config['detection'].get('device', 'CPU') +# print(f"🔄 Updated current device to: {self.current_device}") + +# def set_source(self, source): +# """ +# Set video source (file path, camera index, or URL) + +# Args: +# source: Video source - can be a camera index (int), file path (str), +# or URL (str). If None, defaults to camera 0. + +# Returns: +# bool: True if source was set successfully, False otherwise +# """ +# print(f"🎬 VideoController.set_source called with: {source} (type: {type(source)})") + +# # Store current state +# was_running = self._running + +# # Stop current processing if running +# if self._running: +# print("⏹️ Stopping current video processing") +# self.stop() + +# try: +# # Handle source based on type with better error messages +# if source is None: +# print("⚠️ Received None source, defaulting to camera 0") +# self.source = 0 +# self.source_type = "camera" + +# elif isinstance(source, str) and source.strip(): +# if os.path.exists(source): +# # Valid file path +# self.source = source +# self.source_type = "file" +# print(f"📄 Source set to file: {self.source}") +# elif source.lower().startswith(("http://", "https://", "rtsp://", "rtmp://")): +# # URL stream +# self.source = source +# self.source_type = "url" +# print(f"🌐 Source set to URL stream: {self.source}") +# elif source.isdigit(): +# # String camera index (convert to int) +# self.source = int(source) +# self.source_type = "camera" +# print(f"📹 Source set to camera index: {self.source}") +# else: +# # Try as device path or special string +# self.source = source +# self.source_type = "device" +# print(f"📱 Source set to device path: {self.source}") + +# elif isinstance(source, int): +# # Camera index +# self.source = source +# self.source_type = "camera" +# print(f"📹 Source set to camera index: {self.source}") + +# else: +# # Unrecognized - default to camera 0 with warning +# print(f"⚠️ Unrecognized source type: {type(source)}, defaulting to camera 0") +# self.source = 0 +# self.source_type = "camera" +# except Exception as e: +# print(f"❌ Error setting source: {e}") +# self.source = 0 +# self.source_type = "camera" +# return False + +# # Get properties of the source (fps, dimensions, etc) +# print(f"🔍 Getting properties for source: {self.source}") +# success = self._get_source_properties() + +# if success: +# print(f"✅ Successfully configured source: {self.source} ({self.source_type})") + +# # Reset ByteTrack tracker for new source to ensure IDs start from 1 +# if hasattr(self, 'vehicle_tracker') and self.vehicle_tracker is not None: +# try: +# print("🔄 Resetting vehicle tracker for new source") +# self.vehicle_tracker.reset() +# except Exception as e: +# print(f"⚠️ Could not reset vehicle tracker: {e}") + +# # Emit successful source change +# self.stats_ready.emit({ +# 'source_changed': True, +# 'source_type': self.source_type, +# 'fps': self.source_fps if hasattr(self, 'source_fps') else 0, +# 'dimensions': f"{self.frame_width}x{self.frame_height}" if hasattr(self, 'frame_width') else "unknown" +# }) + +# # Restart if previously running +# if was_running: +# print("▶️ Restarting video processing with new source") +# self.start() +# else: +# print(f"❌ Failed to configure source: {self.source}") +# # Notify UI about the error +# self.stats_ready.emit({ +# 'source_changed': False, +# 'error': f"Invalid video source: {self.source}", +# 'source_type': self.source_type, +# 'fps': 0, +# 'detection_time_ms': "0", +# 'traffic_light_color': {"color": "unknown", "confidence": 0.0} +# }) + +# return False + +# # Return success status +# return success + +# def _get_source_properties(self): + +# try: +# print(f"🔍 Opening video source for properties check: {self.source}") +# cap = cv2.VideoCapture(self.source) + + +# if not cap.isOpened(): +# print(f"❌ Failed to open video source: {self.source}") +# return False + + +# self.source_fps = cap.get(cv2.CAP_PROP_FPS) + + +# self.frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) +# self.frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) +# self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + +# ret, test_frame = cap.read() +# if not ret or test_frame is None: +# print("⚠️ Could not read test frame from source") + +# if self.source_type == "camera": +# print("🔄 Retrying camera initialization...") +# time.sleep(1.0) +# ret, test_frame = cap.read() +# if not ret or test_frame is None: +# print("❌ Camera initialization failed after retry") +# cap.release() +# return False +# else: +# print("❌ Could not read frames from video source") +# cap.release() +# return False + +# # Release the capture +# cap.release() + +# print(f"✅ Video source properties: {self.frame_width}x{self.frame_height}, {self.source_fps} FPS") +# return True + +# except Exception as e: +# print(f"❌ Error getting source properties: {e}") +# return False +# return False + +# def start(self): +# """Start video processing""" +# if not self._running: +# self._running = True +# self.start_time = time.time() +# self.frame_count = 0 +# self.debug_counter = 0 +# print("DEBUG: Starting video processing thread") + +# # Reset notification flags for new session +# if hasattr(self, '_no_traffic_light_notified'): +# delattr(self, '_no_traffic_light_notified') + +# # Reset ByteTrack tracker to ensure IDs start from 1 +# if hasattr(self, 'vehicle_tracker') and self.vehicle_tracker is not None: +# try: +# print("🔄 Resetting vehicle tracker for new session") +# self.vehicle_tracker.reset() +# except Exception as e: +# print(f"⚠️ Could not reset vehicle tracker: {e}") + +# # Start the processing thread - add more detailed debugging +# if not self.thread.isRunning(): +# print("🚀 Thread not running, starting now...") +# try: +# self.thread.start() +# print("✅ Thread started successfully") +# print(f"🔄 Thread state: running={self.thread.isRunning()}, finished={self.thread.isFinished()}") +# except Exception as e: +# print(f"❌ Failed to start thread: {e}") +# import traceback +# traceback.print_exc() +# else: +# print("⚠️ Thread is already running!") +# print(f"🔄 Thread state: running={self.thread.isRunning()}, finished={self.thread.isFinished()}") + +# # Start the render timer with a very aggressive interval (10ms = 100fps) +# # This ensures we can process frames as quickly as possible +# print("⏱️ Starting render timer...") +# self.render_timer.start(10) +# print("✅ Render timer started at 100Hz") + +# def stop(self): +# """Stop video processing""" +# if self._running: +# print("DEBUG: Stopping video processing") +# self._running = False + +# # If paused, wake up the thread so it can exit +# self.mutex.lock() +# self._paused = False +# self.pause_condition.wakeAll() +# self.mutex.unlock() + +# self.render_timer.stop() +# # Properly terminate the thread +# if self.thread.isRunning(): +# self.thread.quit() +# if not self.thread.wait(3000): # Wait 3 seconds max +# self.thread.terminate() +# print("WARNING: Thread termination forced") +# # Clear the current frame +# self.mutex.lock() +# self.current_frame = None +# self.mutex.unlock() +# print("DEBUG: Video processing stopped") + +# def __del__(self): +# print("[VideoController] __del__ called. Cleaning up thread and timer.") +# self.stop() +# if self.thread.isRunning(): +# self.thread.quit() +# self.thread.wait(1000) +# self.render_timer.stop() + +# def capture_snapshot(self) -> np.ndarray: +# """Capture current frame""" +# if self.current_frame is not None: +# return self.current_frame.copy() +# return None + +# def _run(self): +# """Main processing loop (runs in thread)""" +# try: +# # Print the source we're trying to open +# print(f"DEBUG: Opening video source: {self.source} (type: {type(self.source)})") + +# cap = None # Initialize capture variable + +# # Try to open source with more robust error handling +# max_retries = 3 +# retry_delay = 1.0 # seconds + +# # Function to attempt opening the source with multiple retries +# def try_open_source(src, retries=max_retries, delay=retry_delay): +# for attempt in range(1, retries + 1): +# print(f"🎥 Opening source (attempt {attempt}/{retries}): {src}") +# try: +# capture = cv2.VideoCapture(src) +# if capture.isOpened(): +# # Try to read a test frame to confirm it's working +# ret, test_frame = capture.read() +# if ret and test_frame is not None: +# print(f"✅ Source opened successfully: {src}") +# # Reset capture position for file sources +# if isinstance(src, str) and os.path.exists(src): +# capture.set(cv2.CAP_PROP_POS_FRAMES, 0) +# return capture +# else: +# print(f"⚠️ Source opened but couldn't read frame: {src}") +# capture.release() +# else: +# print(f"⚠️ Failed to open source: {src}") + +# # Retry after delay +# if attempt < retries: +# print(f"Retrying in {delay:.1f} seconds...") +# time.sleep(delay) +# except Exception as e: +# print(f"❌ Error opening source {src}: {e}") +# if attempt < retries: +# print(f"Retrying in {delay:.1f} seconds...") +# time.sleep(delay) + +# print(f"❌ Failed to open source after {retries} attempts: {src}") +# return None + +# # Handle different source types +# if isinstance(self.source, str) and os.path.exists(self.source): +# # It's a valid file path +# print(f"📄 Opening video file: {self.source}") +# cap = try_open_source(self.source) + +# elif isinstance(self.source, int) or (isinstance(self.source, str) and self.source.isdigit()): +# # It's a camera index +# camera_idx = int(self.source) if isinstance(self.source, str) else self.source +# print(f"📹 Opening camera with index: {camera_idx}") + +# # For cameras, try with different backend options if it fails +# cap = try_open_source(camera_idx) + +# # If failed, try with DirectShow backend on Windows +# if cap is None and os.name == 'nt': +# print("🔄 Trying camera with DirectShow backend...") +# cap = try_open_source(camera_idx + cv2.CAP_DSHOW) + +# else: +# # Try as a string source (URL or device path) +# print(f"🌐 Opening source as string: {self.source}") +# cap = try_open_source(str(self.source)) + +# # Check if we successfully opened the source +# if cap is None: +# print(f"❌ Failed to open video source after all attempts: {self.source}") +# # Notify UI about the error +# self.stats_ready.emit({ +# 'error': f"Could not open video source: {self.source}", +# 'fps': "0", +# 'detection_time_ms': "0", +# 'traffic_light_color': {"color": "unknown", "confidence": 0.0} +# }) +# return + +# # Check again to ensure capture is valid +# if not cap or not cap.isOpened(): +# print(f"ERROR: Could not open video source {self.source}") +# # Emit a signal to notify UI about the error +# self.stats_ready.emit({ +# 'error': f"Failed to open video source: {self.source}", +# 'fps': "0", +# 'detection_time_ms': "0", +# 'traffic_light_color': {"color": "unknown", "confidence": 0.0} +# }) +# return + +# # Configure frame timing based on source FPS +# frame_time = 1.0 / self.source_fps if self.source_fps > 0 else 0.033 +# prev_time = time.time() + +# # Log successful opening +# print(f"SUCCESS: Video source opened: {self.source}") +# print(f"Source info - FPS: {self.source_fps}, Size: {self.frame_width}x{self.frame_height}") +# # Main processing loop +# frame_error_count = 0 +# max_consecutive_errors = 10 + +# # --- Violation Rule Functions --- +# def point_in_polygon(point, polygon): +# # Simple point-in-rect for now; replace with polygon logic if needed +# x, y = point +# x1, y1, w, h = polygon +# return x1 <= x <= x1 + w and y1 <= y <= y1 + h + +# def calculate_speed(track, history_dict): +# # Use last two positions for speed +# hist = history_dict.get(track['id'], []) +# if len(hist) < 2: +# return 0.0 +# (x1, y1), t1 = hist[-2] +# (x2, y2), t2 = hist[-1] +# dist = ((x2-x1)**2 + (y2-y1)**2)**0.5 +# dt = max(t2-t1, 1e-3) +# return dist / dt + +# def check_vehicle_pedestrian_conflict(vehicle_track, pedestrian_tracks, crosswalk_poly, light_state): +# if light_state != 'green': +# return False +# if not point_in_polygon(vehicle_track['center'], crosswalk_poly): +# return False +# for ped in pedestrian_tracks: +# if point_in_polygon(ped['center'], crosswalk_poly): +# return True +# return False + +# def check_stop_on_crosswalk(vehicle_track, crosswalk_poly, light_state, history_dict): +# if light_state != 'red': +# return False +# is_inside = point_in_polygon(vehicle_track['center'], crosswalk_poly) +# speed = calculate_speed(vehicle_track, history_dict) +# return is_inside and speed < 0.5 + +# def check_amber_overspeed(vehicle_track, light_state, amber_start_time, stopline_poly, history_dict, speed_limit_px_per_sec): +# if light_state != 'amber': +# return False +# if not point_in_polygon(vehicle_track['center'], stopline_poly): +# return False +# current_time = time.time() +# speed = calculate_speed(vehicle_track, history_dict) +# if current_time > amber_start_time and speed > speed_limit_px_per_sec: +# return True +# return False +# # --- End Violation Rule Functions --- + +# while self._running and cap.isOpened(): +# # Handle pause state +# self.mutex.lock() +# if self._paused: +# print("[VideoController] Video paused, waiting...") +# self.pause_condition.wait(self.mutex) +# print("[VideoController] Video resumed") +# self.mutex.unlock() + +# # Exit if we're no longer running (could have stopped while paused) +# if not self._running: +# break + +# try: +# ret, frame = cap.read() +# # Add critical frame debugging +# print(f"🟡 Frame read attempt: ret={ret}, frame={None if frame is None else frame.shape}") + +# if not ret or frame is None: +# frame_error_count += 1 +# print(f"⚠️ Frame read error ({frame_error_count}/{max_consecutive_errors})") + +# if frame_error_count >= max_consecutive_errors: +# print("❌ Too many consecutive frame errors, stopping video thread") +# break + +# # Skip this iteration and try again +# time.sleep(0.1) # Wait a bit before trying again +# continue + +# # Reset the error counter if we successfully got a frame +# frame_error_count = 0 + +# # Store the last frame for VLM analysis during pause +# self._last_frame = frame.copy() +# print(f"🟢 Last frame stored for VLM: {frame.shape}") + +# except Exception as e: +# print(f"❌ Critical error reading frame: {e}") +# frame_error_count += 1 +# if frame_error_count >= max_consecutive_errors: +# print("❌ Too many errors, stopping video thread") +# break +# continue + +# # Detection and violation processing +# process_start = time.time() + +# # Process detections +# detection_start = time.time() +# detections = [] +# if self.model_manager: +# detections = self.model_manager.detect(frame) + +# # Normalize class names for consistency and check for traffic lights +# traffic_light_indices = [] +# for i, det in enumerate(detections): +# if 'class_name' in det: +# original_name = det['class_name'] +# normalized_name = normalize_class_name(original_name) + +# # Keep track of traffic light indices +# if normalized_name == 'traffic light' or original_name == 'traffic light': +# traffic_light_indices.append(i) + +# if original_name != normalized_name: +# print(f"📊 Normalized class name: '{original_name}' -> '{normalized_name}'") + +# det['class_name'] = normalized_name + +# # Ensure we have at least one traffic light for debugging +# if not traffic_light_indices and self.source_type == 'video': +# print("⚠️ No traffic lights detected, checking for objects that might be traffic lights...") + +# # Try lowering the confidence threshold specifically for traffic lights +# # This is only for debugging purposes +# if self.model_manager and hasattr(self.model_manager, 'detect'): +# try: +# low_conf_detections = self.model_manager.detect(frame, conf_threshold=0.2) +# for det in low_conf_detections: +# if 'class_name' in det and det['class_name'] == 'traffic light': +# if det not in detections: +# print(f"🚦 Found low confidence traffic light: {det['confidence']:.2f}") +# detections.append(det) +# except: +# pass + +# detection_time = (time.time() - detection_start) * 1000 + +# # Violation detection is disabled +# violation_start = time.time() +# violations = [] +# # if self.model_manager and detections: +# # violations = self.model_manager.detect_violations( +# # detections, frame, time.time() +# # ) +# violation_time = (time.time() - violation_start) * 1000 + +# # Update tracking if available +# if self.model_manager: +# detections = self.model_manager.update_tracking(detections, frame) +# # If detections are returned as tuples, convert to dicts for downstream code +# if detections and isinstance(detections[0], tuple): +# # Convert (id, bbox, conf, class_id) to dict +# detections = [ +# {'id': d[0], 'bbox': d[1], 'confidence': d[2], 'class_id': d[3]} +# for d in detections +# ] + +# # Calculate timing metrics +# process_time = (time.time() - process_start) * 1000 +# self.processing_times.append(process_time) + +# # Update FPS +# now = time.time() +# self.frame_count += 1 +# elapsed = now - self.start_time +# if elapsed > 0: +# self.actual_fps = self.frame_count / elapsed + +# fps_smoothed = 1.0 / (now - prev_time) if now > prev_time else 0 +# prev_time = now +# # Update metrics +# self.performance_metrics = { +# 'FPS': f"{fps_smoothed:.1f}", +# 'Detection (ms)': f"{detection_time:.1f}", +# 'Total (ms)': f"{process_time:.1f}" +# } + +# # Store current frame data (thread-safe) +# self.mutex.lock() +# self.current_frame = frame.copy() +# self.current_detections = detections +# self.mutex.unlock() + +# # --- SCENE ANALYTICS PROCESSING --- +# # Process detections through scene analytics if available +# if self.scene_analytics: +# try: +# scene_analytics_data = self.scene_analytics.process_frame(frame, detections) +# # Scene analytics automatically emit signals that we handle above +# except Exception as e: +# print(f"Error in scene analytics processing: {e}") + +# # Process frame with annotations before sending to UI +# annotated_frame = frame.copy() + +# # --- VIOLATION DETECTION LOGIC (Run BEFORE drawing boxes) --- +# # First get violation information so we can color boxes appropriately +# violating_vehicle_ids = set() # Track which vehicles are violating +# violations = [] + +# # Initialize traffic light variables +# traffic_lights = [] +# has_traffic_lights = False + +# # Handle multiple traffic lights with consensus approach +# traffic_light_count = 0 +# for det in detections: +# if is_traffic_light(det.get('class_name')): +# has_traffic_lights = True +# traffic_light_count += 1 +# if 'traffic_light_color' in det: +# light_info = det['traffic_light_color'] +# traffic_lights.append({'bbox': det['bbox'], 'color': light_info.get('color', 'unknown'), 'confidence': light_info.get('confidence', 0.0)}) + +# print(f"[TRAFFIC LIGHT] Detected {traffic_light_count} traffic light(s), has_traffic_lights={has_traffic_lights}") +# if has_traffic_lights: +# print(f"[TRAFFIC LIGHT] Traffic light colors: {[tl.get('color', 'unknown') for tl in traffic_lights]}") + +# # Get traffic light position for crosswalk detection +# traffic_light_position = None +# if has_traffic_lights: +# for det in detections: +# if is_traffic_light(det.get('class_name')) and 'bbox' in det: +# traffic_light_bbox = det['bbox'] +# # Extract center point from bbox for crosswalk utils +# x1, y1, x2, y2 = traffic_light_bbox +# traffic_light_position = ((x1 + x2) // 2, (y1 + y2) // 2) +# break + +# # Run crosswalk detection ONLY if traffic light is detected +# crosswalk_bbox, violation_line_y, debug_info = None, None, {} +# if has_traffic_lights and traffic_light_position is not None: +# try: +# print(f"[CROSSWALK] Traffic light detected at {traffic_light_position}, running crosswalk detection") +# # Use new crosswalk_utils2 logic only when traffic light exists +# annotated_frame, crosswalk_bbox, violation_line_y, debug_info = detect_crosswalk_and_violation_line( +# annotated_frame, +# traffic_light_position=traffic_light_position +# ) +# print(f"[CROSSWALK] Detection result: crosswalk_bbox={crosswalk_bbox is not None}, violation_line_y={violation_line_y}") +# # --- Draw crosswalk region if detected and close to traffic light --- +# # (REMOVED: Do not draw crosswalk box or label) +# # if crosswalk_bbox is not None: +# # x, y, w, h = map(int, crosswalk_bbox) +# # tl_x, tl_y = traffic_light_position +# # crosswalk_center_y = y + h // 2 +# # distance = abs(crosswalk_center_y - tl_y) +# # print(f"[CROSSWALK DEBUG] Crosswalk bbox: {crosswalk_bbox}, Traffic light: {traffic_light_position}, vertical distance: {distance}") +# # if distance < 120: +# # cv2.rectangle(annotated_frame, (x, y), (x + w, y + h), (0, 255, 0), 3) +# # cv2.putText(annotated_frame, "Crosswalk", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) +# # # Top and bottom edge of crosswalk +# # top_edge = y +# # bottom_edge = y + h +# # if abs(tl_y - top_edge) < abs(tl_y - bottom_edge): +# # crosswalk_edge_y = top_edge +# # else: +# # crosswalk_edge_y = bottom_edge +# if crosswalk_bbox is not None: +# x, y, w, h = map(int, crosswalk_bbox) +# tl_x, tl_y = traffic_light_position +# crosswalk_center_y = y + h // 2 +# distance = abs(crosswalk_center_y - tl_y) +# print(f"[CROSSWALK DEBUG] Crosswalk bbox: {crosswalk_bbox}, Traffic light: {traffic_light_position}, vertical distance: {distance}") +# # Top and bottom edge of crosswalk +# top_edge = y +# bottom_edge = y + h +# if abs(tl_y - top_edge) < abs(tl_y - bottom_edge): +# crosswalk_edge_y = top_edge +# else: +# crosswalk_edge_y = bottom_edge +# except Exception as e: +# print(f"[ERROR] Crosswalk detection failed: {e}") +# crosswalk_bbox, violation_line_y, debug_info = None, None, {} +# else: +# print(f"[CROSSWALK] No traffic light detected (has_traffic_lights={has_traffic_lights}), skipping crosswalk detection") +# # NO crosswalk detection without traffic light +# violation_line_y = None + +# # Check if crosswalk is detected +# crosswalk_detected = crosswalk_bbox is not None +# stop_line_detected = debug_info.get('stop_line') is not None + +# # ALWAYS process vehicle tracking (moved outside violation logic) +# tracked_vehicles = [] +# if hasattr(self, 'vehicle_tracker') and self.vehicle_tracker is not None: +# try: +# # Filter vehicle detections +# vehicle_classes = ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle'] +# vehicle_dets = [] +# h, w = frame.shape[:2] + +# print(f"[TRACK DEBUG] Processing {len(detections)} total detections") + +# for det in detections: +# if (det.get('class_name') in vehicle_classes and +# 'bbox' in det and +# det.get('confidence', 0) > self.min_confidence_threshold): + +# # Check bbox dimensions +# bbox = det['bbox'] +# x1, y1, x2, y2 = bbox +# box_w, box_h = x2-x1, y2-y1 +# box_area = box_w * box_h +# area_ratio = box_area / (w * h) + +# print(f"[TRACK DEBUG] Vehicle {det.get('class_name')} conf={det.get('confidence'):.2f}, area_ratio={area_ratio:.4f}") + +# if 0.001 <= area_ratio <= 0.25: +# vehicle_dets.append(det) +# print(f"[TRACK DEBUG] Added vehicle: {det.get('class_name')} conf={det.get('confidence'):.2f}") +# else: +# print(f"[TRACK DEBUG] Rejected vehicle: area_ratio={area_ratio:.4f} not in range [0.001, 0.25]") + +# print(f"[TRACK DEBUG] Filtered to {len(vehicle_dets)} vehicle detections") + +# # Update tracker +# if len(vehicle_dets) > 0: +# print(f"[TRACK DEBUG] Updating tracker with {len(vehicle_dets)} vehicles...") +# tracks = self.vehicle_tracker.update(vehicle_dets, frame) +# # Filter out tracks without bbox to avoid warnings +# valid_tracks = [] +# for track in tracks: +# bbox = None +# if isinstance(track, dict): +# bbox = track.get('bbox', None) +# else: +# bbox = getattr(track, 'bbox', None) +# if bbox is not None: +# valid_tracks.append(track) +# else: +# print(f"Warning: Track has no bbox, skipping: {track}") +# tracks = valid_tracks +# print(f"[TRACK DEBUG] Tracker returned {len(tracks)} tracks (after bbox filter)") +# else: +# print(f"[TRACK DEBUG] No vehicles to track, skipping tracker update") +# tracks = [] + +# # Process each tracked vehicle +# tracked_vehicles = [] +# track_ids_seen = [] + +# for track in tracks: +# track_id = track['id'] +# bbox = track['bbox'] +# x1, y1, x2, y2 = map(float, bbox) +# center_y = (y1 + y2) / 2 + +# # Check for duplicate IDs +# if track_id in track_ids_seen: +# print(f"[TRACK ERROR] Duplicate ID detected: {track_id}") +# track_ids_seen.append(track_id) + +# print(f"[TRACK DEBUG] Processing track ID={track_id} bbox={bbox}") + +# # Initialize or update vehicle history +# if track_id not in self.vehicle_history: +# from collections import deque +# self.vehicle_history[track_id] = deque(maxlen=self.position_history_size) + +# # Initialize vehicle status if not exists +# if track_id not in self.vehicle_statuses: +# self.vehicle_statuses[track_id] = { +# 'recent_movement': [], +# 'violation_history': [], +# 'crossed_during_red': False, +# 'last_position': None, # Track last position for jump detection +# 'suspicious_jumps': 0 # Count suspicious position jumps +# } + +# # Detect suspicious position jumps (potential ID switches) +# if self.vehicle_statuses[track_id]['last_position'] is not None: +# last_y = self.vehicle_statuses[track_id]['last_position'] +# center_y = (y1 + y2) / 2 +# position_jump = abs(center_y - last_y) + +# if position_jump > self.max_position_jump: +# self.vehicle_statuses[track_id]['suspicious_jumps'] += 1 +# print(f"[TRACK WARNING] Vehicle ID={track_id} suspicious position jump: {last_y:.1f} -> {center_y:.1f} (jump={position_jump:.1f})") + +# # If too many suspicious jumps, reset violation status to be safe +# if self.vehicle_statuses[track_id]['suspicious_jumps'] > 2: +# print(f"[TRACK RESET] Vehicle ID={track_id} has too many suspicious jumps, resetting violation status") +# self.vehicle_statuses[track_id]['crossed_during_red'] = False +# self.vehicle_statuses[track_id]['suspicious_jumps'] = 0 + +# # Update position history and last position +# self.vehicle_history[track_id].append(center_y) +# self.vehicle_statuses[track_id]['last_position'] = center_y + +# # BALANCED movement detection - detect clear movement while avoiding false positives +# is_moving = False +# movement_detected = False + +# if len(self.vehicle_history[track_id]) >= 3: # Require at least 3 frames for movement detection +# recent_positions = list(self.vehicle_history[track_id]) + +# # Check movement over 3 frames for quick response +# if len(recent_positions) >= 3: +# movement_3frames = abs(recent_positions[-1] - recent_positions[-3]) +# if movement_3frames > self.movement_threshold: # More responsive threshold +# movement_detected = True +# print(f"[MOVEMENT] Vehicle ID={track_id} MOVING: 3-frame movement = {movement_3frames:.1f}") + +# # Confirm with longer movement for stability (if available) +# if len(recent_positions) >= 5: +# movement_5frames = abs(recent_positions[-1] - recent_positions[-5]) +# if movement_5frames > self.movement_threshold * 1.5: # Moderate threshold for 5 frames +# movement_detected = True +# print(f"[MOVEMENT] Vehicle ID={track_id} MOVING: 5-frame movement = {movement_5frames:.1f}") + +# # Store historical movement for smoothing - require consistent movement +# self.vehicle_statuses[track_id]['recent_movement'].append(movement_detected) +# if len(self.vehicle_statuses[track_id]['recent_movement']) > 4: # Shorter history for quicker response +# self.vehicle_statuses[track_id]['recent_movement'].pop(0) + +# # BALANCED: Require majority of recent frames to show movement (2 out of 4) +# recent_movement_count = sum(self.vehicle_statuses[track_id]['recent_movement']) +# total_recent_frames = len(self.vehicle_statuses[track_id]['recent_movement']) +# if total_recent_frames >= 2 and recent_movement_count >= (total_recent_frames * 0.5): # 50% of frames must show movement +# is_moving = True + +# print(f"[TRACK DEBUG] Vehicle ID={track_id} is_moving={is_moving} (threshold={self.movement_threshold})") + +# # Initialize as not violating +# is_violation = False + +# tracked_vehicles.append({ +# 'id': track_id, +# 'bbox': bbox, +# 'center_y': center_y, +# 'is_moving': is_moving, +# 'is_violation': is_violation +# }) + +# print(f"[DEBUG] ByteTrack tracked {len(tracked_vehicles)} vehicles") +# for i, tracked in enumerate(tracked_vehicles): +# print(f" Vehicle {i}: ID={tracked['id']}, center_y={tracked['center_y']:.1f}, moving={tracked['is_moving']}, violating={tracked['is_violation']}") + +# # DEBUG: Print all tracked vehicle IDs and their bboxes for this frame +# if tracked_vehicles: +# print(f"[DEBUG] All tracked vehicles this frame:") +# for v in tracked_vehicles: +# print(f" ID={v['id']} bbox={v['bbox']} center_y={v.get('center_y', 'NA')}") +# else: +# print("[DEBUG] No tracked vehicles this frame!") + +# # Clean up old vehicle data +# current_track_ids = [tracked['id'] for tracked in tracked_vehicles] +# self._cleanup_old_vehicle_data(current_track_ids) + +# except Exception as e: +# print(f"[ERROR] Vehicle tracking failed: {e}") +# import traceback +# traceback.print_exc() +# else: +# print("[WARN] ByteTrack vehicle tracker not available!") + +# # Process violations - CHECK VEHICLES THAT CROSS THE LINE OVER A WINDOW OF FRAMES +# # IMPORTANT: Only process violations if BOTH traffic light is detected AND crosswalk is detected AND red light AND violation line exists + +# # Handle case when no traffic light is detected in video +# if not has_traffic_lights: +# print("[INFO] No traffic light detected in video - violation detection disabled") +# # Emit status to UI (only once per session to avoid spam) +# if not hasattr(self, '_no_traffic_light_notified'): +# self.stats_ready.emit({ +# 'status': 'monitoring_only', +# 'message': 'No traffic light detected - monitoring vehicles only', +# 'violation_detection_active': False, +# 'timestamp': time.time() +# }) +# self._no_traffic_light_notified = True +# else: +# # Check if traffic light is red (only when traffic light exists) +# is_red_light = self.latest_traffic_light and self.latest_traffic_light.get('color') == 'red' + +# # New condition: ALL of these must be true for violation line processing: +# # 1. Traffic lights detected (has_traffic_lights) +# # 2. Crosswalk detected (crosswalk_detected) +# # 3. Red light is currently active (is_red_light) +# # 4. Violation line exists (violation_line_y is not None) +# # 5. Vehicles are being tracked (tracked_vehicles) +# if (has_traffic_lights and crosswalk_detected and is_red_light and +# violation_line_y is not None and tracked_vehicles): +# print(f"[VIOLATION DEBUG] ALL CONDITIONS MET - Traffic light: {has_traffic_lights}, Crosswalk: {crosswalk_detected}, Red light: {is_red_light}, Line Y: {violation_line_y}, Vehicles: {len(tracked_vehicles)}") + +# # Check each tracked vehicle for violations +# for tracked in tracked_vehicles: +# track_id = tracked['id'] +# center_y = tracked['center_y'] +# is_moving = tracked['is_moving'] + +# # Get position history for this vehicle +# position_history = list(self.vehicle_history[track_id]) + +# # Enhanced crossing detection: check over a window of frames +# line_crossed_in_window = False +# crossing_details = None +# if len(position_history) >= 2: +# window_size = min(self.crossing_check_window, len(position_history)) +# for i in range(1, window_size): +# prev_y = position_history[-(i+1)] # Earlier position +# curr_y = position_history[-i] # Later position +# # Check if vehicle crossed the line in this frame pair +# if prev_y < violation_line_y and curr_y >= violation_line_y: +# line_crossed_in_window = True +# crossing_details = { +# 'frames_ago': i, +# 'prev_y': prev_y, +# 'curr_y': curr_y, +# 'window_checked': window_size +# } +# print(f"[VIOLATION DEBUG] Vehicle ID={track_id} crossed line {i} frames ago: {prev_y:.1f} -> {curr_y:.1f}") +# break + +# print(f"[VIOLATION DEBUG] Vehicle ID={track_id}: latest_traffic_light={self.latest_traffic_light}") +# print(f"[VIOLATION DEBUG] Vehicle ID={track_id}: position_history={[f'{p:.1f}' for p in position_history[-5:]]}"); # Show last 5 positions +# print(f"[VIOLATION DEBUG] Vehicle ID={track_id}: line_crossed_in_window={line_crossed_in_window}, crossing_details={crossing_details}") + +# # Enhanced violation detection: vehicle crossed the line while moving (red light already verified above) +# actively_crossing = (line_crossed_in_window and is_moving) + +# # Initialize violation status for new vehicles +# if 'crossed_during_red' not in self.vehicle_statuses[track_id]: +# self.vehicle_statuses[track_id]['crossed_during_red'] = False + +# # Mark vehicle as having crossed during red if it actively crosses +# if actively_crossing: +# # Additional validation: ensure it's not a false positive from ID switch +# suspicious_jumps = self.vehicle_statuses[track_id].get('suspicious_jumps', 0) +# if suspicious_jumps <= 1: # Allow crossing if not too many suspicious jumps +# self.vehicle_statuses[track_id]['crossed_during_red'] = True +# print(f"[VIOLATION ALERT] Vehicle ID={track_id} CROSSED line during red light!") +# print(f" -> Crossing details: {crossing_details}") +# else: +# print(f"[VIOLATION IGNORED] Vehicle ID={track_id} crossing ignored due to {suspicious_jumps} suspicious jumps") + +# # IMPORTANT: Reset violation status when light turns green (regardless of position) +# if not is_red_light: +# if self.vehicle_statuses[track_id]['crossed_during_red']: +# print(f"[VIOLATION RESET] Vehicle ID={track_id} violation status reset (light turned green)") +# self.vehicle_statuses[track_id]['crossed_during_red'] = False + +# # Vehicle is violating ONLY if it crossed during red and light is still red +# is_violation = (self.vehicle_statuses[track_id]['crossed_during_red'] and is_red_light) + +# # Track current violation state for analytics - only actual crossings +# self.vehicle_statuses[track_id]['violation_history'].append(actively_crossing) +# if len(self.vehicle_statuses[track_id]['violation_history']) > 5: +# self.vehicle_statuses[track_id]['violation_history'].pop(0) + +# print(f"[VIOLATION DEBUG] Vehicle ID={track_id}: center_y={center_y:.1f}, line={violation_line_y}") +# print(f" history_window={[f'{p:.1f}' for p in position_history[-self.crossing_check_window:]]}") +# print(f" moving={is_moving}, red_light={is_red_light}") +# print(f" actively_crossing={actively_crossing}, crossed_during_red={self.vehicle_statuses[track_id]['crossed_during_red']}") +# print(f" suspicious_jumps={self.vehicle_statuses[track_id].get('suspicious_jumps', 0)}") +# print(f" FINAL_VIOLATION={is_violation}") + +# # Update violation status +# tracked['is_violation'] = is_violation + +# if actively_crossing and self.vehicle_statuses[track_id].get('suspicious_jumps', 0) <= 1: # Only add if not too many suspicious jumps +# # Add to violating vehicles set +# violating_vehicle_ids.add(track_id) + +# # Add to violations list +# timestamp = datetime.now() # Keep as datetime object, not string +# violations.append({ +# 'track_id': track_id, +# 'id': track_id, +# 'bbox': [int(tracked['bbox'][0]), int(tracked['bbox'][1]), int(tracked['bbox'][2]), int(tracked['bbox'][3])], +# 'violation': 'line_crossing', +# 'violation_type': 'line_crossing', # Add this for analytics compatibility +# 'timestamp': timestamp, +# 'line_position': violation_line_y, +# 'movement': crossing_details if crossing_details else {'prev_y': center_y, 'current_y': center_y}, +# 'crossing_window': self.crossing_check_window, +# 'position_history': list(position_history[-10:]) # Include recent history for debugging +# }) + +# print(f"[DEBUG] 🚨 VIOLATION DETECTED: Vehicle ID={track_id} CROSSED VIOLATION LINE") +# print(f" Enhanced detection: {crossing_details}") +# print(f" Position history: {[f'{p:.1f}' for p in position_history[-10:]]}") +# print(f" Detection window: {self.crossing_check_window} frames") +# print(f" while RED LIGHT & MOVING") + +# else: +# # Log why violation detection was skipped +# reasons = [] +# if not crosswalk_detected: +# reasons.append("No crosswalk detected") +# if not is_red_light: +# reasons.append(f"Light not red (current: {self.latest_traffic_light.get('color') if self.latest_traffic_light else 'None'})") +# if violation_line_y is None: +# reasons.append("No violation line") +# if not tracked_vehicles: +# reasons.append("No vehicles tracked") + +# if reasons: +# print(f"[INFO] Violation detection skipped: {', '.join(reasons)}") + +# # --- ENHANCED VIOLATION DETECTION: Add new real-world scenarios --- +# # 1. Pedestrian right-of-way violation (blocking crosswalk during green) +# # 2. Improper stopping over crosswalk at red +# # 3. Accelerating through yellow/amber light +# pedestrian_dets = [det for det in detections if det.get('class_name') == 'person' and 'bbox' in det] +# pedestrian_tracks = [] +# for ped in pedestrian_dets: +# x1, y1, x2, y2 = ped['bbox'] +# center = ((x1 + x2) // 2, (y1 + y2) // 2) +# pedestrian_tracks.append({'bbox': ped['bbox'], 'center': center}) + +# # Prepare crosswalk polygon for point-in-polygon checks +# crosswalk_poly = None +# if crosswalk_bbox is not None: +# x, y, w, h = crosswalk_bbox +# crosswalk_poly = (x, y, w, h) +# stopline_poly = crosswalk_poly # For simplicity, use crosswalk as stopline + +# # Track amber/yellow light start time +# amber_start_time = getattr(self, 'amber_start_time', None) +# latest_light_color = self.latest_traffic_light.get('color') if isinstance(self.latest_traffic_light, dict) else self.latest_traffic_light +# if latest_light_color == 'yellow' and amber_start_time is None: +# amber_start_time = time.time() +# self.amber_start_time = amber_start_time +# elif latest_light_color != 'yellow': +# self.amber_start_time = None + +# # Vehicle position history for speed calculation +# vehicle_position_history = {} +# for track in tracked_vehicles: +# track_id = track['id'] +# bbox = track['bbox'] +# x1, y1, x2, y2 = bbox +# center = ((x1 + x2) // 2, (y1 + y2) // 2) +# # Store (center, timestamp) +# if track_id not in vehicle_position_history: +# vehicle_position_history[track_id] = [] +# vehicle_position_history[track_id].append((center, time.time())) +# track['center'] = center + +# # --- 1. Pedestrian right-of-way violation --- +# if crosswalk_poly and latest_light_color == 'green' and pedestrian_tracks: +# for track in tracked_vehicles: +# # Only consider moving vehicles - stopped vehicles aren't likely to be violating +# if track.get('is_moving', False) and point_in_polygon(track['center'], crosswalk_poly): +# # Check for pedestrians in the crosswalk +# for ped in pedestrian_tracks: +# if point_in_polygon(ped['center'], crosswalk_poly): +# # Vehicle is blocking crosswalk during green with pedestrian present +# violations.append({ +# 'track_id': track['id'], +# 'id': track['id'], +# 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])], +# 'violation': 'pedestrian_right_of_way', +# 'violation_type': 'pedestrian_right_of_way', +# 'timestamp': datetime.now(), +# 'details': { +# 'pedestrian_bbox': ped['bbox'], +# 'crosswalk_bbox': crosswalk_bbox, +# 'is_moving': track.get('is_moving', False), +# 'traffic_light': latest_light_color +# } +# }) +# print(f"[VIOLATION] Pedestrian right-of-way violation: Vehicle ID={track['id']} blocking crosswalk during green light with pedestrian present") + +# # --- 2. Improper stopping over crosswalk at red --- +# if crosswalk_poly and latest_light_color == 'red': +# for track in tracked_vehicles: +# # Check if vehicle is not moving (to confirm it's stopped) +# is_stopped = not track.get('is_moving', True) + +# if is_stopped and point_in_polygon(track['center'], crosswalk_poly): +# # Calculate overlap ratio between vehicle and crosswalk +# vx1, vy1, vx2, vy2 = track['bbox'] +# cx, cy, cw, ch = crosswalk_poly +# overlap_x1 = max(vx1, cx) +# overlap_y1 = max(vy1, cy) +# overlap_x2 = min(vx2, cx + cw) +# overlap_y2 = min(vy2, cy + ch) +# overlap_area = max(0, overlap_x2 - overlap_x1) * max(0, overlap_y2 - overlap_y1) +# vehicle_area = (vx2 - vx1) * (vy2 - vy1) +# overlap_ratio = overlap_area / max(vehicle_area, 1) + +# # Double-verify that vehicle is stopped by checking explicit speed +# speed = 0.0 +# hist = vehicle_position_history.get(track['id'], []) +# if len(hist) >= 2: +# (c1, t1), (c2, t2) = hist[-2], hist[-1] +# dist = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5 +# dt = max(t2-t1, 1e-3) +# speed = dist / dt + +# # Vehicle must have significant overlap with crosswalk (>25%) and be stopped +# if overlap_ratio > 0.25 and speed < 0.8: +# violations.append({ +# 'track_id': track['id'], +# 'id': track['id'], +# 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])], +# 'violation': 'stop_on_crosswalk', +# 'violation_type': 'stop_on_crosswalk', +# 'timestamp': datetime.now(), +# 'details': { +# 'overlap_ratio': overlap_ratio, +# 'speed': speed, +# 'crosswalk_bbox': crosswalk_bbox, +# 'traffic_light': latest_light_color, +# 'is_moving_flag': track.get('is_moving', None) +# } +# }) +# print(f"[VIOLATION] Improper stop on crosswalk: Vehicle ID={track['id']} stopped on crosswalk during red light (overlap={overlap_ratio:.2f}, speed={speed:.2f})") + +# # --- 3. Accelerating through yellow/amber light --- +# if stopline_poly and latest_light_color == 'yellow' and amber_start_time: +# # Calculate time since light turned yellow +# current_time = time.time() +# time_since_yellow = current_time - amber_start_time + +# # Speed threshold (in pixels per second) - can be adjusted based on testing +# speed_limit_px_per_sec = 8.0 + +# # Check each vehicle approaching the intersection +# for track in tracked_vehicles: +# # Check if vehicle is near the stop line/intersection +# if point_in_polygon(track['center'], stopline_poly) or ( +# track['center'][1] < stopline_poly[1] + stopline_poly[3] + 50 and +# track['center'][1] > stopline_poly[1] - 50 +# ): +# # If the vehicle is moving (confirmed via tracker) +# if track.get('is_moving', False): +# # Calculate acceleration by looking at recent speed changes +# hist = vehicle_position_history.get(track['id'], []) +# if len(hist) >= 3: +# # Calculate speeds at different time points +# (c1, t1), (c2, t2), (c3, t3) = hist[-3], hist[-2], hist[-1] + +# # Speed at earlier point +# v1 = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5 / max(t2-t1, 1e-3) + +# # Speed at later point +# v2 = ((c3[0]-c2[0])**2 + (c3[1]-c2[1])**2)**0.5 / max(t3-t2, 1e-3) + +# # Acceleration violation if: +# # 1. Speed increases significantly (>20%) +# # 2. Final speed exceeds threshold +# # 3. Yellow light is less than 3 seconds old (typical acceleration window) +# if v2 > v1 * 1.2 and v2 > speed_limit_px_per_sec and time_since_yellow < 3.0: +# violations.append({ +# 'track_id': track['id'], +# 'id': track['id'], +# 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])], +# 'violation': 'amber_acceleration', +# 'violation_type': 'amber_acceleration', +# 'timestamp': datetime.now(), +# 'details': { +# 'speed_before': v1, +# 'speed_after': v2, +# 'acceleration': (v2-v1)/max(t3-t2, 1e-3), +# 'time_since_yellow': time_since_yellow, +# 'traffic_light': latest_light_color +# } +# }) +# print(f"[VIOLATION] Amber light acceleration: Vehicle ID={track['id']} accelerated from {v1:.2f} to {v2:.2f} px/sec {time_since_yellow:.1f}s after yellow light") + +# # Emit progress signal after processing each frame +# if hasattr(self, 'progress_ready'): +# self.progress_ready.emit(int(cap.get(cv2.CAP_PROP_POS_FRAMES)), int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), time.time()) + +# # Draw detections with bounding boxes - NOW with violation info +# # Only show traffic light and vehicle classes +# allowed_classes = ['traffic light', 'car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle'] +# filtered_detections = [det for det in detections if det.get('class_name') in allowed_classes] +# print(f"Drawing {len(filtered_detections)} detection boxes on frame (filtered)") +# # Statistics for debugging +# vehicles_with_ids = 0 +# vehicles_without_ids = 0 +# vehicles_moving = 0 +# vehicles_violating = 0 + +# if detections and len(detections) > 0: +# # Only show traffic light and vehicle classes +# allowed_classes = ['traffic light', 'car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle'] +# filtered_detections = [det for det in detections if det.get('class_name') in allowed_classes] +# print(f"Drawing {len(filtered_detections)} detection boxes on frame (filtered)") +# # Statistics for debugging +# vehicles_with_ids = 0 +# vehicles_without_ids = 0 +# vehicles_moving = 0 +# vehicles_violating = 0 +# for det in filtered_detections: +# if 'bbox' in det: +# bbox = det['bbox'] +# x1, y1, x2, y2 = map(int, bbox) +# label = det.get('class_name', 'object') +# confidence = det.get('confidence', 0.0) + +# # Robustness: ensure label and confidence are not None +# if label is None: +# label = 'object' +# if confidence is None: +# confidence = 0.0 +# class_id = det.get('class_id', -1) + +# # Check if this detection corresponds to a violating or moving vehicle +# det_center_x = (x1 + x2) / 2 +# det_center_y = (y1 + y2) / 2 +# is_violating_vehicle = False +# is_moving_vehicle = False +# vehicle_id = None + +# # Match detection with tracked vehicles - IMPROVED MATCHING +# if label in ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle'] and len(tracked_vehicles) > 0: +# print(f"[MATCH DEBUG] Attempting to match {label} detection at ({det_center_x:.1f}, {det_center_y:.1f}) with {len(tracked_vehicles)} tracked vehicles") +# best_match = None +# best_distance = float('inf') +# best_iou = 0.0 + +# for i, tracked in enumerate(tracked_vehicles): +# track_bbox = tracked['bbox'] +# track_x1, track_y1, track_x2, track_y2 = map(float, track_bbox) + +# # Calculate center distance +# track_center_x = (track_x1 + track_x2) / 2 +# track_center_y = (track_y1 + track_y2) / 2 +# center_distance = ((det_center_x - track_center_x)**2 + (det_center_y - track_center_y)**2)**0.5 + +# # Calculate IoU (Intersection over Union) +# intersection_x1 = max(x1, track_x1) +# intersection_y1 = max(y1, track_y1) +# intersection_x2 = min(x2, track_x2) +# intersection_y2 = min(y2, track_y2) + +# if intersection_x2 > intersection_x1 and intersection_y2 > intersection_y1: +# intersection_area = (intersection_x2 - intersection_x1) * (intersection_y2 - intersection_y1) +# det_area = (x2 - x1) * (y2 - y1) +# track_area = (track_x2 - track_x1) * (track_y2 - track_y1) +# union_area = det_area + track_area - intersection_area +# iou = intersection_area / union_area if union_area > 0 else 0 +# else: +# iou = 0 + +# print(f"[MATCH DEBUG] Track {i}: ID={tracked['id']}, center=({track_center_x:.1f}, {track_center_y:.1f}), distance={center_distance:.1f}, IoU={iou:.3f}") + +# # Use stricter matching criteria - prioritize IoU over distance +# # Good match if: high IoU OR close center distance with some overlap +# is_good_match = (iou > 0.3) or (center_distance < 60 and iou > 0.1) + +# if is_good_match: +# print(f"[MATCH DEBUG] Track {i} is a good match (IoU={iou:.3f}, distance={center_distance:.1f})") +# # Prefer higher IoU, then lower distance +# match_score = iou + (100 - min(center_distance, 100)) / 100 # Composite score +# if iou > best_iou or (iou == best_iou and center_distance < best_distance): +# best_distance = center_distance +# best_iou = iou +# best_match = tracked +# else: +# print(f"[MATCH DEBUG] Track {i} failed matching criteria (IoU={iou:.3f}, distance={center_distance:.1f})") + +# if best_match: +# vehicle_id = best_match['id'] +# is_moving_vehicle = best_match.get('is_moving', False) +# is_violating_vehicle = best_match.get('is_violation', False) +# print(f"[MATCH SUCCESS] Detection at ({det_center_x:.1f},{det_center_y:.1f}) matched with track ID={vehicle_id}") +# print(f" -> STATUS: moving={is_moving_vehicle}, violating={is_violating_vehicle}, IoU={best_iou:.3f}, distance={best_distance:.1f}") +# else: +# print(f"[MATCH FAILED] No suitable match found for {label} detection at ({det_center_x:.1f}, {det_center_y:.1f})") +# print(f" -> Will draw as untracked detection with default color") +# else: +# if label not in ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle']: +# print(f"[MATCH DEBUG] Skipping matching for non-vehicle label: {label}") +# elif len(tracked_vehicles) == 0: +# print(f"[MATCH DEBUG] No tracked vehicles available for matching") +# else: +# try: +# if len(tracked_vehicles) > 0: +# distances = [((det_center_x - (t['bbox'][0] + t['bbox'][2])/2)**2 + (det_center_y - (t['bbox'][1] + t['bbox'][3])/2)**2)**0.5 for t in tracked_vehicles[:3]] +# print(f"[DEBUG] No match found for detection at ({det_center_x:.1f},{det_center_y:.1f}) - distances: {distances}") +# else: +# print(f"[DEBUG] No tracked vehicles available to match detection at ({det_center_x:.1f},{det_center_y:.1f})") +# except NameError: +# print(f"[DEBUG] No match found for detection (coords unavailable)") +# if len(tracked_vehicles) > 0: +# print(f"[DEBUG] Had {len(tracked_vehicles)} tracked vehicles available") + +# # Choose box color based on vehicle status +# # PRIORITY: 1. Violating (RED) - crossed during red light 2. Moving (ORANGE) 3. Stopped (GREEN) +# if is_violating_vehicle and vehicle_id is not None: +# box_color = (0, 0, 255) # RED for violating vehicles (crossed line during red) +# label_text = f"{label}:ID{vehicle_id}⚠️" +# thickness = 4 +# vehicles_violating += 1 +# print(f"[COLOR DEBUG] Drawing RED box for VIOLATING vehicle ID={vehicle_id} (crossed during red)") +# elif is_moving_vehicle and vehicle_id is not None and not is_violating_vehicle: +# box_color = (0, 165, 255) # ORANGE for moving vehicles (not violating) +# label_text = f"{label}:ID{vehicle_id}" +# thickness = 3 +# vehicles_moving += 1 +# print(f"[COLOR DEBUG] Drawing ORANGE box for MOVING vehicle ID={vehicle_id} (not violating)") +# elif label in ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle'] and vehicle_id is not None: +# box_color = (0, 255, 0) # Green for stopped vehicles +# label_text = f"{label}:ID{vehicle_id}" +# thickness = 2 +# print(f"[COLOR DEBUG] Drawing GREEN box for STOPPED vehicle ID={vehicle_id}") +# elif is_traffic_light(label): +# box_color = (0, 0, 255) # Red for traffic lights +# label_text = f"{label}" +# thickness = 2 +# else: +# box_color = (0, 255, 0) # Default green for other objects +# label_text = f"{label}" +# thickness = 2 + +# # Update statistics +# if label in ['car', 'truck', 'bus', 'motorcycle', 'van', 'bicycle']: +# if vehicle_id is not None: +# vehicles_with_ids += 1 +# else: +# vehicles_without_ids += 1 + +# # Draw rectangle and label +# cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), box_color, thickness) +# cv2.putText(annotated_frame, label_text, (x1, y1-10), +# cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color, 2) +# # id_text = f"ID: {det['id']}" +# # # Calculate text size for background +# # (tw, th), baseline = cv2.getTextSize(id_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2) +# # # Draw filled rectangle for background (top-left of bbox) +# # cv2.rectangle(annotated_frame, (x1, y1 - th - 8), (x1 + tw + 4, y1), (0, 0, 0), -1) +# # # Draw the ID text in bold yellow +# # cv2.putText(annotated_frame, id_text, (x1 + 2, y1 - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2, cv2.LINE_AA) +# # print(f"[DEBUG] Detection ID: {det['id']} BBOX: {bbox} CLASS: {label} CONF: {confidence:.2f}") + +# if class_id == 9 or is_traffic_light(label): +# try: +# light_info = detect_traffic_light_color(annotated_frame, [x1, y1, x2, y2]) +# if light_info.get("color", "unknown") == "unknown": +# light_info = ensure_traffic_light_color(annotated_frame, [x1, y1, x2, y2]) +# det['traffic_light_color'] = light_info +# # Draw enhanced traffic light status +# annotated_frame = draw_traffic_light_status(annotated_frame, bbox, light_info) + +# # --- Update latest_traffic_light for UI/console --- +# self.latest_traffic_light = light_info + +# # Add a prominent traffic light status at the top of the frame +# color = light_info.get('color', 'unknown') +# confidence = light_info.get('confidence', 0.0) + +# if color == 'red': +# status_color = (0, 0, 255) # Red +# status_text = f"Traffic Light: RED ({confidence:.2f})" + +# # Draw a prominent red banner across the top +# banner_height = 40 +# cv2.rectangle(annotated_frame, (0, 0), (annotated_frame.shape[1], banner_height), (0, 0, 150), -1) + +# # Add text +# font = cv2.FONT_HERSHEY_DUPLEX +# font_scale = 0.9 +# font_thickness = 2 +# cv2.putText(annotated_frame, status_text, (10, banner_height-12), font, +# font_scale, (255, 255, 255), font_thickness) +# except Exception as e: +# print(f"[WARN] Could not detect/draw traffic light color: {e}") + +# # Print statistics summary +# print(f"[STATS] Vehicles: {vehicles_with_ids} with IDs, {vehicles_without_ids} without IDs") + +# # Handle multiple traffic lights with consensus approach +# for det in detections: +# if is_traffic_light(det.get('class_name')): +# has_traffic_lights = True +# if 'traffic_light_color' in det: +# light_info = det['traffic_light_color'] +# traffic_lights.append({'bbox': det['bbox'], 'color': light_info.get('color', 'unknown'), 'confidence': light_info.get('confidence', 0.0)}) + +# # Determine the dominant traffic light color based on confidence +# if traffic_lights: +# # Filter to just red lights and sort by confidence +# red_lights = [tl for tl in traffic_lights if tl.get('color') == 'red'] +# if red_lights: +# # Use the highest confidence red light for display +# highest_conf_red = max(red_lights, key=lambda x: x.get('confidence', 0)) +# # Update the global traffic light status for consistent UI display +# self.latest_traffic_light = { +# 'color': 'red', +# 'confidence': highest_conf_red.get('confidence', 0.0) +# } + +# # Emit all violations as a batch for UI (optional) +# if violations: +# if hasattr(self, 'violations_batch_ready'): +# self.violations_batch_ready.emit(violations) +# # Emit individual violation signals for each violation +# for violation in violations: +# print(f"🚨 Emitting RED LIGHT VIOLATION: Track ID {violation['track_id']}") +# violation['frame'] = frame +# violation['violation_line_y'] = violation_line_y +# self.violation_detected.emit(violation) +# print(f"[DEBUG] Emitted {len(violations)} violation signals") + +# # Add FPS display directly on frame +# # cv2.putText(annotated_frame, f"FPS: {fps_smoothed:.1f}", (10, 30), +# # cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) + +# # # --- Always draw detected traffic light color indicator at top --- +# # color = self.latest_traffic_light.get('color', 'unknown') if isinstance(self.latest_traffic_light, dict) else str(self.latest_traffic_light) +# # confidence = self.latest_traffic_light.get('confidence', 0.0) if isinstance(self.latest_traffic_light, dict) else 0.0 +# # indicator_size = 30 +# # margin = 10 +# # status_colors = { +# # "red": (0, 0, 255), +# # "yellow": (0, 255, 255), +# # "green": (0, 255, 0), +# # "unknown": (200, 200, 200) +# # } +# # draw_color = status_colors.get(color, (200, 200, 200)) +# # # Draw circle indicator +# # cv2.circle( +# # annotated_frame, +# # (annotated_frame.shape[1] - margin - indicator_size, margin + indicator_size), +# # indicator_size, +# # draw_color, +# # -1 +# # ) +# # # Add color text +# # cv2.putText( +# # annotated_frame, +# # f"{color.upper()} ({confidence:.2f})", +# # (annotated_frame.shape[1] - margin - indicator_size - 120, margin + indicator_size + 10), +# # cv2.FONT_HERSHEY_SIMPLEX, +# # 0.7, +# # (0, 0, 0), +# # 2 +# # ) + +# # Signal for raw data subscribers (now without violations) +# # Emit with correct number of arguments +# try: +# self.raw_frame_ready.emit(frame.copy(), detections, fps_smoothed) +# print(f"✅ raw_frame_ready signal emitted with {len(detections)} detections, fps={fps_smoothed:.1f}") +# except Exception as e: +# print(f"❌ Error emitting raw_frame_ready: {e}") +# import traceback +# traceback.print_exc() + +# # Emit the NumPy frame signal for direct display - annotated version for visual feedback +# print(f"🔴 Emitting frame_np_ready signal with annotated_frame shape: {annotated_frame.shape}") +# try: +# # Make sure the frame can be safely transmitted over Qt's signal system +# # Create a contiguous copy of the array +# frame_copy = np.ascontiguousarray(annotated_frame) +# print(f"🔍 Debug - Before emission: frame_copy type={type(frame_copy)}, shape={frame_copy.shape}, is_contiguous={frame_copy.flags['C_CONTIGUOUS']}") +# self.frame_np_ready.emit(frame_copy) +# print("✅ frame_np_ready signal emitted successfully") +# except Exception as e: +# print(f"❌ Error emitting frame: {e}") +# import traceback +# traceback.print_exc() + +# # Emit QPixmap for video detection tab (frame_ready) +# try: +# from PySide6.QtGui import QImage, QPixmap +# rgb_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB) +# h, w, ch = rgb_frame.shape +# bytes_per_line = ch * w +# qimg = QImage(rgb_frame.data, w, h, bytes_per_line, QImage.Format_RGB888) +# pixmap = QPixmap.fromImage(qimg) +# metrics = { +# 'FPS': fps_smoothed, +# 'Detection (ms)': detection_time +# } +# self.frame_ready.emit(pixmap, detections, metrics) +# print("✅ frame_ready signal emitted for video detection tab") +# except Exception as e: +# print(f"❌ Error emitting frame_ready: {e}") +# import traceback +# traceback.print_exc() + +# # Emit stats signal for performance monitoring +# # Count traffic lights for UI (confidence >= 0.5) +# traffic_light_count = 0 +# for det in detections: +# if is_traffic_light(det.get('class_name')): +# tl_conf = 0.0 +# if 'traffic_light_color' in det and isinstance(det['traffic_light_color'], dict): +# tl_conf = det['traffic_light_color'].get('confidence', 0.0) +# if tl_conf >= 0.5: +# traffic_light_count += 1 +# # Count cars for UI (confidence >= 0.5) +# car_count = 0 +# for det in detections: +# if det.get('class_name') == 'car' and det.get('confidence', 0.0) >= 0.5: +# car_count += 1 +# # Get model information from model manager +# model_info = {} +# if self.model_manager and hasattr(self.model_manager, 'get_current_model_info'): +# model_info = self.model_manager.get_current_model_info() +# print(f"🔧 DEBUG: Model info from manager: {model_info}") + +# stats = { +# 'fps': fps_smoothed, +# 'detection_fps': fps_smoothed, # Numeric value for analytics +# 'detection_time': detection_time, +# 'detection_time_ms': detection_time, # Numeric value for analytics +# 'traffic_light_color': self.latest_traffic_light, +# 'tlights': traffic_light_count, # Only confident traffic lights +# 'cars': car_count, # Only confident cars +# 'model_path': model_info.get('model_path', ''), # Add model path for UI +# 'model_name': model_info.get('model_name', 'Unknown') # Add model name for UI +# } +# print(f"🔧 DEBUG: Stats with model info: model_name={stats.get('model_name')}, model_path={stats.get('model_path')}") + +# # Print detailed stats for debugging +# tl_color = "unknown" +# if isinstance(self.latest_traffic_light, dict): +# tl_color = self.latest_traffic_light.get('color', 'unknown') +# elif isinstance(self.latest_traffic_light, str): +# tl_color = self.latest_traffic_light + +# print(f"🟢 Stats Updated: FPS={fps_smoothed:.2f}, Inference={detection_time:.2f}ms, Traffic Light={tl_color}") + +# # Emit stats signal +# self.stats_ready.emit(stats) + +# # Emit performance stats for performance graphs +# perf_stats = { +# 'frame_idx': self.frame_count, +# 'fps': fps_smoothed, +# 'inference_time': detection_time, +# 'device': getattr(self, 'current_device', 'CPU'), +# 'resolution': getattr(self, 'current_resolution', f'{frame.shape[1]}x{frame.shape[0]}' if frame is not None else '-'), +# 'model_name': model_info.get('model_name', 'Unknown'), # Add model name for performance graphs +# 'is_spike': False, # TODO: Add spike logic if needed +# 'is_res_change': False, # TODO: Add res change logic if needed +# 'cpu_spike': False, # TODO: Add cpu spike logic if needed +# } +# print(f"[PERF] Emitting performance_stats_ready: {perf_stats}") +# self.performance_stats_ready.emit(perf_stats) + +# # --- Update last analysis data for VLM --- +# self._last_analysis_data = { +# 'detections': detections, +# 'tracked_vehicles': tracked_vehicles if 'tracked_vehicles' in locals() else [], +# 'traffic_light': self.latest_traffic_light, +# 'crosswalk_bbox': crosswalk_bbox if 'crosswalk_bbox' in locals() else None, +# 'violation_line_y': violation_line_y if 'violation_line_y' in locals() else None, +# 'crosswalk_detected': crosswalk_bbox is not None if 'crosswalk_bbox' in locals() else False, +# 'traffic_light_position': traffic_light_position if has_traffic_lights else None, +# 'frame_shape': frame.shape if frame is not None else None, +# 'timestamp': time.time() +# } + +# # --- Ensure analytics update every frame --- +# # Always add traffic_light_color to each detection dict for analytics +# for det in detections: +# if is_traffic_light(det.get('class_name')): +# if 'traffic_light_color' not in det: +# det['traffic_light_color'] = self.latest_traffic_light if hasattr(self, 'latest_traffic_light') else {'color': 'unknown', 'confidence': 0.0} +# if hasattr(self, 'analytics_controller') and self.analytics_controller is not None: +# try: +# self.analytics_controller.process_frame_data(frame, detections, stats) +# print("[DEBUG] Called analytics_controller.process_frame_data for analytics update") +# except Exception as e: +# print(f"[ERROR] Could not update analytics: {e}") + +# # Control processing rate for file sources +# if isinstance(self.source, str) and self.source_fps > 0: +# frame_duration = time.time() - process_start +# if frame_duration < frame_time: +# time.sleep(frame_time - frame_duration) + +# cap.release() +# except Exception as e: +# print(f"Video processing error: {e}") +# import traceback +# traceback.print_exc() +# finally: +# self._running = False +# def _process_frame(self): +# """Process current frame for display with improved error handling""" +# try: +# self.mutex.lock() +# if self.current_frame is None: +# print("⚠️ No frame available to process") +# self.mutex.unlock() + +# # Check if we're running - if not, this is expected behavior +# if not self._running: +# return + +# # If we are running but have no frame, create a blank frame with error message +# h, w = 480, 640 # Default size +# blank_frame = np.zeros((h, w, 3), dtype=np.uint8) +# cv2.putText(blank_frame, "No video input", (w//2-100, h//2), +# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + +# # Emit this blank frame +# try: +# self.frame_np_ready.emit(blank_frame) +# except Exception as e: +# print(f"Error emitting blank frame: {e}") + +# return + +# # Make a copy of the data we need +# try: +# frame = self.current_frame.copy() +# detections = self.current_detections.copy() if self.current_detections else [] +# metrics = self.performance_metrics.copy() +# except Exception as e: +# print(f"Error copying frame data: {e}") +# self.mutex.unlock() +# return + +# self.mutex.unlock() +# except Exception as e: +# print(f"Critical error in _process_frame initialization: {e}") +# import traceback +# traceback.print_exc() +# try: +# self.mutex.unlock() +# except: +# pass +# return + +# try: +# # --- Simplified frame processing for display --- +# # The violation logic is now handled in the main _run thread +# # This method just handles basic display overlays + +# annotated_frame = frame.copy() + +# # Add performance overlays and debug markers - COMMENTED OUT for clean video display +# # annotated_frame = draw_performance_overlay(annotated_frame, metrics) +# # cv2.circle(annotated_frame, (20, 20), 10, (255, 255, 0), -1) + +# # Convert BGR to RGB before display (for PyQt/PySide) +# frame_rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB) +# # Display the RGB frame in the UI (replace with your display logic) +# # Example: self.image_label.setPixmap(QPixmap.fromImage(QImage(frame_rgb.data, w, h, QImage.Format_RGB888))) +# except Exception as e: +# print(f"Error in _process_frame: {e}") +# import traceback +# traceback.print_exc() + +# def _cleanup_old_vehicle_data(self, current_track_ids): +# """ +# Clean up tracking data for vehicles that are no longer being tracked. +# This prevents memory leaks and improves performance. + +# Args: +# current_track_ids: Set of currently active track IDs +# """ +# # Find IDs that are no longer active +# old_ids = set(self.vehicle_history.keys()) - set(current_track_ids) + +# if old_ids: +# print(f"[CLEANUP] Removing tracking data for {len(old_ids)} old vehicle IDs: {sorted(old_ids)}") +# for old_id in old_ids: +# # Remove from history and status tracking +# if old_id in self.vehicle_history: +# del self.vehicle_history[old_id] +# if old_id in self.vehicle_statuses: +# del self.vehicle_statuses[old_id] +# print(f"[CLEANUP] Now tracking {len(self.vehicle_history)} active vehicles") + +# # --- Removed unused internal violation line detection methods and RedLightViolationSystem usage --- +# def play(self): +# """Alias for start(), for UI compatibility.""" +# self.start() + +# def pause(self): +# """Pause video processing.""" +# print("[VideoController] Pause requested") +# self.mutex.lock() +# self._paused = True +# self.mutex.unlock() + +# # Emit the last captured frame for VLM analysis if available +# if hasattr(self, '_last_frame') and self._last_frame is not None: +# print("[VideoController] Emitting last frame for VLM analysis during pause") +# try: +# # Emit the last frame with empty detections for VLM +# self.raw_frame_ready.emit(self._last_frame.copy(), [], 0.0) +# print("✅ Last frame emitted for VLM analysis") +# except Exception as e: +# print(f"❌ Error emitting last frame: {e}") +# else: +# print("[VideoController] No last frame available for VLM analysis") + +# # Emit pause state signal +# self.pause_state_changed.emit(True) +# print("[VideoController] Pause state signal emitted: True") + +# def resume(self): +# """Resume video processing from pause.""" +# print("[VideoController] Resume requested") +# self.mutex.lock() +# self._paused = False +# self.pause_condition.wakeAll() # Wake up any waiting threads +# self.mutex.unlock() +# # Emit pause state signal +# self.pause_state_changed.emit(False) +# print("[VideoController] Pause state signal emitted: False") + +# def get_current_analysis_data(self): +# """Get current analysis data for VLM insights.""" +# return self._last_analysis_data.copy() if self._last_analysis_data else {} + +# def _on_scene_object_detected(self, obj_data: dict): +# """Handle scene object detection signal""" +# try: +# # Forward scene object detection to analytics +# print(f"[SCENE] Object detected: {obj_data.get('category', 'unknown')} " +# f"(confidence: {obj_data.get('confidence', 0):.2f})") +# except Exception as e: +# print(f"Error handling scene object detection: {e}") + +# def _on_scene_analytics_updated(self, analytics_data: dict): +# """Handle scene analytics update signal""" +# try: +# # Forward scene analytics to performance stats +# camera_id = analytics_data.get('camera_id', 'unknown') +# fps = analytics_data.get('fps', 0) +# processing_time = analytics_data.get('processing_time_ms', 0) +# object_count = analytics_data.get('object_count', 0) + +# # Update performance metrics with scene analytics +# self.performance_metrics['Scene_FPS'] = fps +# self.performance_metrics['Scene_Objects'] = object_count +# self.performance_metrics['Scene_Processing_ms'] = processing_time + +# print(f"[SCENE] Analytics updated - FPS: {fps:.1f}, Objects: {object_count}, " +# f"Processing: {processing_time:.1f}ms") + +# except Exception as e: +# print(f"Error handling scene analytics update: {e}") + +# def _on_roi_event_detected(self, event_data: dict): +# """Handle ROI event detection signal""" +# try: +# event_type = event_data.get('type', 'unknown') +# roi_id = event_data.get('roi_id', 'unknown') +# object_category = event_data.get('object_category', 'unknown') + +# print(f"[SCENE] ROI Event: {event_type} in {roi_id} - {object_category}") + +# # Emit as violation if it's a safety-related event +# if 'safety' in event_type.lower() or 'violation' in event_type.lower(): +# violation_data = { +# 'type': 'roi_violation', +# 'roi_id': roi_id, +# 'object_category': object_category, +# 'timestamp': event_data.get('timestamp'), +# 'confidence': event_data.get('confidence', 1.0), +# 'source': 'scene_analytics' +# } +# self.violation_detected.emit(violation_data) + +# except Exception as e: +# print(f"Error handling ROI event: {e}") + +# @Slot(str) +# def on_model_switched(self, device): +# """Handle device switch from config panel.""" +# try: +# print(f"🔄 Video Controller: Device switch requested to {device}") + +# # Update our device reference +# self.current_device = device +# print(f"✅ Video Controller: current_device updated to {device}") + +# # If we have a model manager, the device switch should already be done +# # Just log the current state for verification +# if self.model_manager and hasattr(self.model_manager, 'detector'): +# if hasattr(self.model_manager.detector, 'device'): +# current_device = self.model_manager.detector.device +# print(f"✅ Video Controller: Model manager detector now using device: {current_device}") +# else: +# print(f"✅ Video Controller: Model manager detector updated to {device}") + +# print(f"✅ Video Controller: Device switch to {device} completed") + +# except Exception as e: +# print(f"❌ Video Controller: Error during device switch: {e}") +from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer, Slot from PySide6.QtGui import QImage, QPixmap import cv2 import time @@ -87,6 +2041,7 @@ class VideoController(QObject): auto_select_model_device = Signal() # Signal for UI to request auto model/device selection performance_stats_ready = Signal(dict) # NEW: Signal for performance tab (fps, inference, device, res) violations_batch_ready = Signal(list) # NEW: Signal to emit a batch of violations + pause_state_changed = Signal(bool) # Signal emitted when pause state changes (True=paused, False=playing) def __init__(self, model_manager=None): """ @@ -99,11 +2054,15 @@ class VideoController(QObject): print("Loaded advanced VideoController from video_controller_new.py") # DEBUG: Confirm correct controller self._running = False + self._paused = False # Add pause state + self._last_frame = None # Store last frame for VLM analysis during pause + self._last_analysis_data = {} # Store last analysis data for VLM self.source = None self.source_type = None self.source_fps = 0 self.performance_metrics = {} self.mutex = QMutex() + self.pause_condition = QWaitCondition() # Add wait condition for pause # Performance tracking self.processing_times = deque(maxlen=100) # Store last 100 processing times @@ -116,6 +2075,13 @@ class VideoController(QObject): self.inference_model = None self.tracker = None + # Initialize device tracking + if self.model_manager and hasattr(self.model_manager, 'config'): + self.current_device = self.model_manager.config.get("detection", {}).get("device", "CPU") + else: + self.current_device = "CPU" + print(f"🔧 Video Controller: Initialized with device: {self.current_device}") + self.current_frame = None self.current_detections = [] @@ -187,6 +2153,18 @@ class VideoController(QObject): # Add red light violation system # self.red_light_violation_system = RedLightViolationSystem() + # Initialize scene analytics adapter + try: + from utils.scene_analytics import SceneAnalyticsAdapter + self.scene_analytics = SceneAnalyticsAdapter(camera_id="desktop_main") + self.scene_analytics.object_detected.connect(self._on_scene_object_detected) + self.scene_analytics.scene_analytics_updated.connect(self._on_scene_analytics_updated) + self.scene_analytics.roi_event_detected.connect(self._on_roi_event_detected) + print("✅ Scene analytics adapter initialized") + except Exception as e: + self.scene_analytics = None + print(f"❌ Could not initialize scene analytics: {e}") + def set_source(self, source): """ Set video source (file path, camera index, or URL) @@ -389,6 +2367,13 @@ class VideoController(QObject): if self._running: print("DEBUG: Stopping video processing") self._running = False + + # If paused, wake up the thread so it can exit + self.mutex.lock() + self._paused = False + self.pause_condition.wakeAll() + self.mutex.unlock() + self.render_timer.stop() # Properly terminate the thread if self.thread.isRunning(): @@ -569,6 +2554,18 @@ class VideoController(QObject): # --- End Violation Rule Functions --- while self._running and cap.isOpened(): + # Handle pause state + self.mutex.lock() + if self._paused: + print("[VideoController] Video paused, waiting...") + self.pause_condition.wait(self.mutex) + print("[VideoController] Video resumed") + self.mutex.unlock() + + # Exit if we're no longer running (could have stopped while paused) + if not self._running: + break + try: ret, frame = cap.read() # Add critical frame debugging @@ -588,6 +2585,11 @@ class VideoController(QObject): # Reset the error counter if we successfully got a frame frame_error_count = 0 + + # Store the last frame for VLM analysis during pause + self._last_frame = frame.copy() + print(f"🟢 Last frame stored for VLM: {frame.shape}") + except Exception as e: print(f"❌ Critical error reading frame: {e}") frame_error_count += 1 @@ -685,7 +2687,17 @@ class VideoController(QObject): self.current_frame = frame.copy() self.current_detections = detections self.mutex.unlock() - # Process frame with annotations before sending to UI + + # --- SCENE ANALYTICS PROCESSING --- + # Process detections through scene analytics if available + if self.scene_analytics: + try: + scene_analytics_data = self.scene_analytics.process_frame(frame, detections) + # Scene analytics automatically emit signals that we handle above + except Exception as e: + print(f"Error in scene analytics processing: {e}") + + # Process frame with annotations before sending to UI annotated_frame = frame.copy() # --- VIOLATION DETECTION LOGIC (Run BEFORE drawing boxes) --- @@ -1102,7 +3114,9 @@ class VideoController(QObject): # --- 1. Pedestrian right-of-way violation --- if crosswalk_poly and latest_light_color == 'green' and pedestrian_tracks: for track in tracked_vehicles: - if point_in_polygon(track['center'], crosswalk_poly): + # Only consider moving vehicles - stopped vehicles aren't likely to be violating + if track.get('is_moving', False) and point_in_polygon(track['center'], crosswalk_poly): + # Check for pedestrians in the crosswalk for ped in pedestrian_tracks: if point_in_polygon(ped['center'], crosswalk_poly): # Vehicle is blocking crosswalk during green with pedestrian present @@ -1115,16 +3129,21 @@ class VideoController(QObject): 'timestamp': datetime.now(), 'details': { 'pedestrian_bbox': ped['bbox'], - 'crosswalk_bbox': crosswalk_bbox + 'crosswalk_bbox': crosswalk_bbox, + 'is_moving': track.get('is_moving', False), + 'traffic_light': latest_light_color } }) - print(f"[VIOLATION] Pedestrian right-of-way violation: Vehicle ID={track['id']} blocking crosswalk during green") + print(f"[VIOLATION] Pedestrian right-of-way violation: Vehicle ID={track['id']} blocking crosswalk during green light with pedestrian present") # --- 2. Improper stopping over crosswalk at red --- if crosswalk_poly and latest_light_color == 'red': for track in tracked_vehicles: - if point_in_polygon(track['center'], crosswalk_poly): - # Calculate overlap ratio + # Check if vehicle is not moving (to confirm it's stopped) + is_stopped = not track.get('is_moving', True) + + if is_stopped and point_in_polygon(track['center'], crosswalk_poly): + # Calculate overlap ratio between vehicle and crosswalk vx1, vy1, vx2, vy2 = track['bbox'] cx, cy, cw, ch = crosswalk_poly overlap_x1 = max(vx1, cx) @@ -1134,7 +3153,8 @@ class VideoController(QObject): overlap_area = max(0, overlap_x2 - overlap_x1) * max(0, overlap_y2 - overlap_y1) vehicle_area = (vx2 - vx1) * (vy2 - vy1) overlap_ratio = overlap_area / max(vehicle_area, 1) - # Check if vehicle is stopped (low speed) + + # Double-verify that vehicle is stopped by checking explicit speed speed = 0.0 hist = vehicle_position_history.get(track['id'], []) if len(hist) >= 2: @@ -1142,7 +3162,9 @@ class VideoController(QObject): dist = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5 dt = max(t2-t1, 1e-3) speed = dist / dt - if overlap_ratio > 0.3 and speed < 0.5: + + # Vehicle must have significant overlap with crosswalk (>25%) and be stopped + if overlap_ratio > 0.25 and speed < 0.8: violations.append({ 'track_id': track['id'], 'id': track['id'], @@ -1153,37 +3175,64 @@ class VideoController(QObject): 'details': { 'overlap_ratio': overlap_ratio, 'speed': speed, - 'crosswalk_bbox': crosswalk_bbox + 'crosswalk_bbox': crosswalk_bbox, + 'traffic_light': latest_light_color, + 'is_moving_flag': track.get('is_moving', None) } }) - print(f"[VIOLATION] Improper stop on crosswalk: Vehicle ID={track['id']} overlap={overlap_ratio:.2f} speed={speed:.2f}") + print(f"[VIOLATION] Improper stop on crosswalk: Vehicle ID={track['id']} stopped on crosswalk during red light (overlap={overlap_ratio:.2f}, speed={speed:.2f})") # --- 3. Accelerating through yellow/amber light --- if stopline_poly and latest_light_color == 'yellow' and amber_start_time: - speed_limit_px_per_sec = 8.0 # Example threshold, tune as needed + # Calculate time since light turned yellow + current_time = time.time() + time_since_yellow = current_time - amber_start_time + + # Speed threshold (in pixels per second) - can be adjusted based on testing + speed_limit_px_per_sec = 8.0 + + # Check each vehicle approaching the intersection for track in tracked_vehicles: - if point_in_polygon(track['center'], stopline_poly): - # Calculate speed delta - hist = vehicle_position_history.get(track['id'], []) - if len(hist) >= 3: - (c1, t1), (c2, t2), (c3, t3) = hist[-3], hist[-2], hist[-1] - v1 = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5 / max(t2-t1, 1e-3) - v2 = ((c3[0]-c2[0])**2 + (c3[1]-c2[1])**2)**0.5 / max(t3-t2, 1e-3) - if v2 > v1 * 1.2 and v2 > speed_limit_px_per_sec: - violations.append({ - 'track_id': track['id'], - 'id': track['id'], - 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])], - 'violation': 'amber_acceleration', - 'violation_type': 'amber_acceleration', - 'timestamp': datetime.now(), - 'details': { - 'speed_before': v1, - 'speed_after': v2, - 'crosswalk_bbox': crosswalk_bbox - } - }) - print(f"[VIOLATION] Amber acceleration: Vehicle ID={track['id']} v1={v1:.2f} v2={v2:.2f}") + # Check if vehicle is near the stop line/intersection + if point_in_polygon(track['center'], stopline_poly) or ( + track['center'][1] < stopline_poly[1] + stopline_poly[3] + 50 and + track['center'][1] > stopline_poly[1] - 50 + ): + # If the vehicle is moving (confirmed via tracker) + if track.get('is_moving', False): + # Calculate acceleration by looking at recent speed changes + hist = vehicle_position_history.get(track['id'], []) + if len(hist) >= 3: + # Calculate speeds at different time points + (c1, t1), (c2, t2), (c3, t3) = hist[-3], hist[-2], hist[-1] + + # Speed at earlier point + v1 = ((c2[0]-c1[0])**2 + (c2[1]-c1[1])**2)**0.5 / max(t2-t1, 1e-3) + + # Speed at later point + v2 = ((c3[0]-c2[0])**2 + (c3[1]-c2[1])**2)**0.5 / max(t3-t2, 1e-3) + + # Acceleration violation if: + # 1. Speed increases significantly (>20%) + # 2. Final speed exceeds threshold + # 3. Yellow light is less than 3 seconds old (typical acceleration window) + if v2 > v1 * 1.2 and v2 > speed_limit_px_per_sec and time_since_yellow < 3.0: + violations.append({ + 'track_id': track['id'], + 'id': track['id'], + 'bbox': [int(track['bbox'][0]), int(track['bbox'][1]), int(track['bbox'][2]), int(track['bbox'][3])], + 'violation': 'amber_acceleration', + 'violation_type': 'amber_acceleration', + 'timestamp': datetime.now(), + 'details': { + 'speed_before': v1, + 'speed_after': v2, + 'acceleration': (v2-v1)/max(t3-t2, 1e-3), + 'time_since_yellow': time_since_yellow, + 'traffic_light': latest_light_color + } + }) + print(f"[VIOLATION] Amber light acceleration: Vehicle ID={track['id']} accelerated from {v1:.2f} to {v2:.2f} px/sec {time_since_yellow:.1f}s after yellow light") # Emit progress signal after processing each frame if hasattr(self, 'progress_ready'): @@ -1551,6 +3600,19 @@ class VideoController(QObject): print(f"[PERF] Emitting performance_stats_ready: {perf_stats}") self.performance_stats_ready.emit(perf_stats) + # --- Update last analysis data for VLM --- + self._last_analysis_data = { + 'detections': detections, + 'tracked_vehicles': tracked_vehicles if 'tracked_vehicles' in locals() else [], + 'traffic_light': self.latest_traffic_light, + 'crosswalk_bbox': crosswalk_bbox if 'crosswalk_bbox' in locals() else None, + 'violation_line_y': violation_line_y if 'violation_line_y' in locals() else None, + 'crosswalk_detected': crosswalk_bbox is not None if 'crosswalk_bbox' in locals() else False, + 'traffic_light_position': traffic_light_position if has_traffic_lights else None, + 'frame_shape': frame.shape if frame is not None else None, + 'timestamp': time.time() + } + # --- Ensure analytics update every frame --- # Always add traffic_light_color to each detection dict for analytics for det in detections: @@ -1669,5 +3731,118 @@ class VideoController(QObject): def play(self): """Alias for start(), for UI compatibility.""" self.start() + + def pause(self): + """Pause video processing.""" + print("[VideoController] Pause requested") + self.mutex.lock() + self._paused = True + self.mutex.unlock() + + # Emit the last captured frame for VLM analysis if available + if hasattr(self, '_last_frame') and self._last_frame is not None: + print("[VideoController] Emitting last frame for VLM analysis during pause") + try: + # Emit the last frame with empty detections for VLM + self.raw_frame_ready.emit(self._last_frame.copy(), [], 0.0) + print("✅ Last frame emitted for VLM analysis") + except Exception as e: + print(f"❌ Error emitting last frame: {e}") + else: + print("[VideoController] No last frame available for VLM analysis") + + # Emit pause state signal + self.pause_state_changed.emit(True) + print("[VideoController] Pause state signal emitted: True") + + def resume(self): + """Resume video processing from pause.""" + print("[VideoController] Resume requested") + self.mutex.lock() + self._paused = False + self.pause_condition.wakeAll() # Wake up any waiting threads + self.mutex.unlock() + # Emit pause state signal + self.pause_state_changed.emit(False) + print("[VideoController] Pause state signal emitted: False") + def get_current_analysis_data(self): + """Get current analysis data for VLM insights.""" + return self._last_analysis_data.copy() if self._last_analysis_data else {} + def _on_scene_object_detected(self, obj_data: dict): + """Handle scene object detection signal""" + try: + # Forward scene object detection to analytics + print(f"[SCENE] Object detected: {obj_data.get('category', 'unknown')} " + f"(confidence: {obj_data.get('confidence', 0):.2f})") + except Exception as e: + print(f"Error handling scene object detection: {e}") + + def _on_scene_analytics_updated(self, analytics_data: dict): + """Handle scene analytics update signal""" + try: + # Forward scene analytics to performance stats + camera_id = analytics_data.get('camera_id', 'unknown') + fps = analytics_data.get('fps', 0) + processing_time = analytics_data.get('processing_time_ms', 0) + object_count = analytics_data.get('object_count', 0) + + # Update performance metrics with scene analytics + self.performance_metrics['Scene_FPS'] = fps + self.performance_metrics['Scene_Objects'] = object_count + self.performance_metrics['Scene_Processing_ms'] = processing_time + + print(f"[SCENE] Analytics updated - FPS: {fps:.1f}, Objects: {object_count}, " + f"Processing: {processing_time:.1f}ms") + + except Exception as e: + print(f"Error handling scene analytics update: {e}") + + def _on_roi_event_detected(self, event_data: dict): + """Handle ROI event detection signal""" + try: + event_type = event_data.get('type', 'unknown') + roi_id = event_data.get('roi_id', 'unknown') + object_category = event_data.get('object_category', 'unknown') + + print(f"[SCENE] ROI Event: {event_type} in {roi_id} - {object_category}") + + # Emit as violation if it's a safety-related event + if 'safety' in event_type.lower() or 'violation' in event_type.lower(): + violation_data = { + 'type': 'roi_violation', + 'roi_id': roi_id, + 'object_category': object_category, + 'timestamp': event_data.get('timestamp'), + 'confidence': event_data.get('confidence', 1.0), + 'source': 'scene_analytics' + } + self.violation_detected.emit(violation_data) + + except Exception as e: + print(f"Error handling ROI event: {e}") + + @Slot(str) + def on_model_switched(self, device): + """Handle device switch from config panel.""" + try: + print(f"🔄 Video Controller: Device switch requested to {device}") + + # Update our device reference + self.current_device = device + print(f"✅ Video Controller: current_device updated to {device}") + + # If we have a model manager, the device switch should already be done + # Just log the current state for verification + if self.model_manager and hasattr(self.model_manager, 'detector'): + if hasattr(self.model_manager.detector, 'device'): + current_device = self.model_manager.detector.device + print(f"✅ Video Controller: Model manager detector now using device: {current_device}") + else: + print(f"✅ Video Controller: Model manager detector updated to {device}") + + print(f"✅ Video Controller: Device switch to {device} completed") + + except Exception as e: + print(f"❌ Video Controller: Error during device switch: {e}") diff --git a/qt_app_pyside1/finale/views/settings_view.py b/qt_app_pyside1/finale/views/settings_view.py index a49ad4b..336386d 100644 --- a/qt_app_pyside1/finale/views/settings_view.py +++ b/qt_app_pyside1/finale/views/settings_view.py @@ -10,7 +10,7 @@ from PySide6.QtWidgets import ( QSlider, QTextEdit, QFileDialog, QMessageBox, QProgressBar, QFormLayout, QButtonGroup, QRadioButton ) -from PySide6.QtCore import Qt, Signal, Slot, QTimer, QSettings, QThread, pyqtSignal +from PySide6.QtCore import Qt, Signal, Slot, QTimer, QSettings, QThread from PySide6.QtGui import QFont, QPixmap import os diff --git a/qt_app_pyside1/requirements.txt b/qt_app_pyside1/requirements.txt index 97d37593247d8d86ab6dbbc05173481f94e60abc..e8704d60644b74bff294450d83d34924e4e04a98 100644 GIT binary patch delta 931 zcmaJ<%Syvg6r5t+h^}0?5(_T1)!2s!R!{^H>_QZGB59kZr8TK-iq=hVC$5zKh~Rhm z3*yX4B8VX2=048cGc)Jhuf_M}*UPt;XMY_91eo9!Lrl3IqQ`vz4{h!`2r)8*<_=s& zN7y2MfQT!X^OUO|S{M`8B3_wXJ8SZMz!72kv6MTUTRlJ>6_hx3_;;|0eN?&H)|g%A z*XDIzo;~Hg6=5BIY1FU|svn>Ew_Lo01xWc7&d^AC15*@|aX|G_J>q;vtg<6lG(v3e9S<-iiENRFx7Hep>R^7#qnTi05|-azL~Lqzo7 zH~&36rqV>Dp}ID3j!S+8Y;c@1kIYW+fMXn?5Zmzy9l4fXWh@w-MEQ0uMeb-Ktkbui&PfJNE1%0P>XIa=*6YeC@qkjtHU1CWZwUOg30|ppIL}| L(o496)lAekkG7bg delta 114 zcmdlW{Eufsh^YdD217nW9zza8C4&M(B11Am2}3E6&H?gL7*ZJufH)7#$^*(}0(r#@ zniCfaPfRP@?7?Wm%x}wJ%V5Hw$6&@_4kQgHFJzHtH3YH@CLd)n+a diff --git a/qt_app_pyside1/ui/__pycache__/__init__.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/__init__.cpython-311.pyc index d739730bf0d57a224c7b417871c0b286c87cfbb9..a7d11795c663bee68cfe7788c0b56d3c86408e36 100644 GIT binary patch delta 73 zcmbQwxQCH@IWI340}zC$u9(PeYUJ;16%$&VT2vg9l3JFWlV6$=SwIWI340}#C2o-vWzRMg1DD#j(hJTE6dF{L=hJ0r0qG1q8fkrx0PoDK>A diff --git a/qt_app_pyside1/ui/__pycache__/analytics_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/analytics_tab.cpython-311.pyc index 927afce7e59c4c58594a33689fe3038fe0d21061..b53f5ac82c9531f9a75df96590464787f2e72fd6 100644 GIT binary patch literal 27416 zcmeHwYj7Lax!B^p02d@kg5nb*_$K%gC5p62ijw&dE!h^Wm*tq2#0zmj3K9s=3y=~K zP~+sfjqIuo?dw#uB9GwI_N8k(;k`F$ncHNlc$(C6r%85^ZHc?BW;C~Td-abVB{ROB z+iAb=EU>`hK`DuErk%N~<%hH9eCO7b(Vtqa77DJv_}dRJ?EeZy{daswUWP>A z!K)CsL9rC8@lkV{NzI&gQah)c)XnK9^>c@nzWHLBcz$K(u!I7B;_*S zM^u+tG97=;%g%a2Qo-@lBY`VVyO#nB_;5J&U_Ab``@F{ok;fK-7e*FBp@3hq96#kg zpUBE!*yl+++0vhmggkRtn#aw0xYJ(GWtUdc9uG4<3_7D5rzNL_NIw5 zQ?mEvG<`D7i^(;>N0t@V!F;DPPC@_EKc<2li z?DlugcwtaF=V7*=c`-EQo}ZtZUkZ9z&+an|-ZRR8O~D{_%`b6fP;l6hYrMOX&13*2 z_#}YOU!v~P)>ll|O`CK@jII#qDv_?@>8g88E!XrST_3M&z~7zm+B*ELQJ!h$o6M`s zD}~n!uaSQbpfpM~EpRBvw0H-gdQwCv^|W8sL^Qdl2CI?(y$3`7fimnG3h%F(Ma;f>m;G9^G`?Ysir*k2R`Omvs?co;I7*n9vs8#iC?MQCl*U{tG=>oP5sA9coIZP#FY# zSs&3y(?%*E9@IdeXXG>HBEfvL5;akK>V2tDBx}yfyKuPf2#`Uq+vg2?oS_RIC!s&i zXOAemK=3|JrtlSuGwn+3+0L)3ViDoId-~EW7g+GKozu9n4mcb3jP@Mr?H$h&-e9w7GrqIqKxF-cY(@2$_M*~p*0O$U5*k_bHSP_|L$Wg-?*ylTg z(B1*(p6>Z8>i1$!f^gY``@n$ne89(Mqik1orHC2#oY%KB;A}Y&I3EZFS~{G=9Q1UD zGwAjQJ7EX#&SaN7?`B!Ae|EsRyEj+eQr$b?e9`Cid)(YC=VrYgf2j4!?t$(O=MsTe z_6+QX@E(j&bqm%GuORF<1Kr8NfjZ&yZf8T!-o5?3hg{heJ0IYHgX+YWEd&Rg`*PNf z;6UByuQ-DNpO;ORr!Fovxb9-0)T;WWxVnLXdAHw_V*urvJoGKt%15g7qs`b%f znb4z*U?(DzGPlPjE9G~Z=BhQ;+ns|t0DIaA3~A^B40Lwe^N+>=uw6hKfOq|>YGPrJ zr>`Cumi~QXBia3x7?#TyydjS|b(@!72rPQIfx1V1cP^5cDt!Srovqt%yzD&Tnc+Oa z3(hg%E!}?){Ew9)gmGaUy?nMqDYA(wj>!V3yK)WA@?ttrhVS&OCA;i z0ly!plw{-N6+Y$m`G^F`p&%hypf)E%oF_DWfz(Xaint714W=4>2_;F$8}fOke6n&R zStBRqo&&j|)47#T~=f3gUFBT=*8nGypP#$N=CGKow=SZ_>^f z?R>Z7ot*;REz;dQ-A&$$g6wLwKsSkW6Hhn&#z<8+z1eZAfx<#a0c)A6$$Q2Rj29a*y=>`&08>4Flx=y6)c)IQ$_5^va z{T9T!MB2sEE_ic!Jzw7|?C2AB^r@O;$0phF)d02#XoDt+boD0P5ThHSV`5{!Ko5xY z08bAtq>+-`xc2l<=_5glW&Hqios?hxq?p6vL=)!TSqcvZz412k-CX`T4qJ)B-*Bx32qAKn_3aT9T%Y9tSE`wwq^Mqs(C~Q6f)cOP}slv?j9-l98 zIq_8R1n2Si|GrM9U^>9!c0)oC->DEL z7d(VJa}uU}B=`yz@)H0+sS3+p@m}{{)7=G z)OJ?~T!WwEFb5LX5-v%u52`CK zNu0wj-4`?{D5fttkBo&2Pbam;&QnXEZte$!1zf_g@YbA4VldyLD^lJlFDGsg-?bls zvZS~nd^`X^y@uK#ChXd74XFyhjs^VA>&Mx#HoW$tP`euzfUGUX^;5G+_r>TwSSI^$ z{UpHl>!(Mc_loph9+sVY{d5X+mq>Tr)=zzM{m6VU z?rzhUfI&~&*wR)J?$RzJH-huG@K_)SYDv)VwO;91a)oJ`OLsmm8IJ}Qk;gYDR*`>J zUR|hdhShb$c{*X8$+xzwiz%)gwy57?Wwm3?TDDzTsiLER9J~YI|FeB5`i!y=VJ*Tf zFDr#5gBBqhh9vL7>sibqFnL-vivuiS9z##Ic_W{RN6mufR_})t=8K{(B9BC_EXFR6 z5h>#hIT^-G0}-&6n%ve$HcQzKm^m3rU@xPW!}lmonR;H zhmjQn1*1Rc4WgI~D{cw2#k0;(0QW95UK(YsG*<`Zy2DTdmGUReU_JmEHCflby+ZBF z&CccuV>HWoSycWjCYF3E=9I!yLCx1rw-3}Heo%9C=E#(0YDKGl0PUA8i`57B5h zY;R~gHQ_4YkV%zHAmojMfG625WQcl_9aU>GIXK}#73k3bScTAZESVAtE3Ofr%89;B zo>D+~P6RHGqB$^_C;?UUFl|OW7u>*(a z3A9tBott!TjPCuxC9&^_KtCzcPxADWkWG!!6{EYhC`&hJc`T^q0RWExKyd}7RmY%U zs?-6=rI-u=9szWaWYj-Jp$hFH-OkhPi7M2`==yi}iwy$;Jt)$HeDap}WZZ|77s~2n zPyQR&`)=UQluS|ms+2bw?gg29Nl4j6fvwEI9ivvXXaC(9AY04&WrGScV@D&H?+{Gm zvMF8NlkR*G$uQ2V>#|Ho=-euwP@0oLLcs*Rka9#*mjk=2j@4cwybSj)tJ|&yixk%k zZ_T-DtKO<-4d8^Z?ORv0ehX{*Qr{Qa0M2bFO}_`tEi`~rF;$_%~iuY*2_#JQ7ffijm25IstOk>#AMo4bT= zK69S?{|7dPly;XWoELCM0lV|}@BcFh+&_c>)y0YU+i&0Bz^H$U0Jr>P{CDox!;@rI z6ibqE7`FFCPXgiQnncU$Y7cw==a~DSAUKGbObKyZ(x2iMJom8%6W|qG0PF+d85V#I z(fK@n$?Wkj%y~FyFozCFk}eqHh^22lz*bum3(;{-0_N< z&5E{IMcYSx>kaGN#vY;lfY^ROs5mHA9ONqwLLgpQ`)*G(^mhL{{d{Ey-0D2RjS->! zpx6$mkBb$L^A(RnVB0+P>(g29`MY~%wb8*?Sr=c{6)#V{u)KpW?^rkfxM73+q3cI3 zzIzmIbp^63txgYaj0)|Ki|tUaVXBgxaR{p4x-!%gFofkhD{&?}uqPo!Ez9G<--@2#O@67I; zd$zJ0*4M48?r3pzOt85`n~S%(An@5BOv5c5Oj0skv$e0tjuMvi76jBb#hgC7iV_6S zl}oLdA=W&?z9|TaKn^^7ch;4O$7+F?>cS04M;kE1Cln+yL6u#;fo_J}2}i_~R}KXgL#S5}J1Jqs z?}A{sDJStsx&kDf3TVg{JPP=rjHIo6VaixV4BM?Sx>catM7oWq+el1fjBXU@W|3~@ z>1GmxqKHDNfLOtutrteODRHkNfmRi=qpFW};qt56caFbBYpk+&5e3VzzxCMD}?QB^F zE|V8`M;5MC)!P>J__O7gh&}@5frTwiO(1xxNIivNVzCurm?z7H{y$?&0e&}x-Y$tN zLW6v&ey)VR1L~2cGf+J(n=|l}{invVWME}UdCLVE81{_kD(DYYnd+xyCIf#*#&h*U z@_*UAtHs`pnDVSibEGg*Fr$qy5i47BE6X0DS{IIp8P?LyNFiJMZ9VIZ6p-J#2m`V8 z5i6{jjJW`^kmmZIMNF{96`HFmu|YMHsEZ1gdQL(4joB+0J|9v(`bf?j{8iID!iuEX zWKx)!cx>}Ys(}hMr!X2Ru6MaeRpX{eQ7+oRma@%0`?4)!i=sHYUg23HV$1ZHxPI!)csz<$(< zUk7ry2fDP}Mfid01}=Lzmrl}lbxVw5Fh*C7Wk^pbqF*BN;1}lovmU?a$~<=U$kVypNul0hT>$Cn#{=j9wq(6T?aOHhH z@t4T}5NO4D`)?KHOJP>fLKMxyg3TsyeS*a*E3 z47idWmlTKFoTS4~rA-&yL3b#`k=ZC&yg{!YdbxP(#SEfDlXql1B*u@^S1xjWj&rEr)OuI^C|Gh$` z!m`tH1Tz;xj`UAx>Ew)&%nCa9Dq@x>U82lEj>Ssgz;XW^!8Z{oL#ND9(|3%H||vMWnj5Je~H>MKBF<$-(`yVk_$iI%O5+p zd2Bj%Y+5+x5s!IbW*`M_f$@rrmuI~3rk-nKB2y0dhc*pJa zn|4>s?pkvR_PwHgFHi2fWfkcZhd;Iek3z+w2(WBXEL+^7G{w8(_L>{dy#7q|h+yv$ z?OnXR>+^fA$FM(|p+B5mo3*=RwY&K}V|UIAwMWF-BRo?>I`{NP)BK(hp=(s^8s$q) z%eTNhCo<3R%yW>pzL95k#;e=6s3z+0I(d5(dX7K{rIsvk9~5OKmCF*ct#vNBRI~Aj#htD3+LpBne&?R}&c=9M+m_kTUIMeW zy#&1di%Y@Vzqk}9ap`Xgsp1`56jgp!d#6JFl_CG^+4RRmq-QQW5@N1u#p!A`I5)r7MOh^ zvyW%?#hDtI=kgtA$~KwW7*i`Sbs|&8=ej?KYs&;B!*38t^&d}YCXCcCjFx8%x?kAZ zo}qNVqzn+A#$7RaDYjrA(64V8Pz#gK9-OI&d)1H^Gs`)DS$oX@vb+9e8k`Olek)Uo z#}BfDJ?R*^wLY&HB3d^8+l{PAEm^X3wp5Yt!~aY!nY9p(Jk9n39JymzpRWI5%@KVr zjvP2_%d-Be9?OtJX&iPzct?W6R+yF`+0kW=M30sR4n1zOxLo8uAB+RQ(=_n>i?a@L zL_%^*RljV;VG6G#Y}((ej%Z%fUf&5SO$~V?{M>?^;7swy@FQs!NvxWEwd_Y0V>&Q5 ze;|BdyS-6qQYQbZ0a=_-I z2$m51D+I_}fv;%|{78jLPSUXSXJQ$mJy)QEfJ@^(#H`4o1dRX)$C1HnsEgX3*HGi) zj0k=K#S&hl#N2{0n_VcgdDGSzvvnfTu4#+!Y>wCM+N|q|)%C1D zb*DtA8x!lsfPU{n`n@ZYes2{Lir5-sS_P&}WZL-TEz>`ZxgU1^1js}GfCOgTgu>|{ zn?m76Od|~SF=H9&(S5w5WTZ>?ahCzYsuhDP7jz}mgGGQy-$3gIT40pjP0NOeei=>(S~kHS?Qe!u zT1ho88z9yQ7;Or2*AW-vN|HuvW0y7k*nGF%bybF9;iE zGjzZjD%~$9zY95N$v226j~VoZmOOn{1o}l=mVU7yFMe^>m@pA0Pub9W8GUTbi%VzW zn)2dWvT$kWw~|Z1`ooIX;n?}@(1)oQ7%xBQLDOPjjQm<0m%L+98ZonW6ti$&d9-o4 z{=1Jkt+g6=cu)Y@PXM5ve%(yJaLh|&KgS7aj|7Y%a)--(NM70 z?O3*6wPG1^C~dDR4|gQ?x`fy#>u<>CYCcbv^^7|Wua^wO8&WbYB-8>Ha8aHCO%sT& zv&s5_9p1w(;1Wky1W+}BkiSGeYg<75|JLo>Z#(7B+$6&+e&Du%CJKoDj+1=vMzH?o ztv_)lbPwT@qi(NXIX#4Y?iSW{^~}hE=D&cwq)E&Wl)nr{O+dJN zJ1#&~sf5iEy~l$qK!89@)<3B+AB>llT^oaqsj(F{qs@k)Si{gp;1f=0I3YHi;2CGU zs9}q$vzFeg=-902j#YH8mv6X)iZQWbY{d|F?A~NW7tW<>`2R)5=pTPsPisRxiZLn)$NkxT6gb8W5pVg)kc{YvRkA z;trRBKuYMq67HpCOO|`jX1`JJdO_UY8ZUS9_GY-_u1$P|2^tHFb9k*9)6&M|` zVH#230RTHL2s|ZJRReCvLqg>Nu@am&Dy)zK;57p{Z|nit|Hk6$i>otHAN~`{d&Kgd z75zO&?d{VyPe-3#uNNG>qN8`k60d2#J#lklt#`dasOb}H`c};GTG#EXH?OW;*w6{J z`^DP*D@=T6>uvu{|Jrlw)56XHap%BF0rr|>HTe3$m4l?$pw(q{(a~603t!ekx(Q>P z(Vk?-LBFhwla5eY-WEL`eevy+yuA(X-@X?}S8SC~b`X&?)+%T;G!X?U6n&z4(Ya@R zY-5+eJSj3y^2s~NQ5-~$LI#AR7}}`gONQVUm;)knfX{U&If{cxj^gX%*70KMXT_HB zeY&4jmyGx5e%51vu!^HlWTptr-;_nRF9cJ&KPhTCGpfYEryRQn442q{@U8MU8OBc7!xl2ie2p6C|R4 z2icH_+RTH5hQ=LrqN4}caikmoVByOY>8`ze)qLA}(;B_F4wQ4hSg~KQ4~h1nxc$KD z5uy@ohJ>2^V$FWuegN*c-ErgS>qn!lYlj7UuW0YxGU<_K>5*mu*h+!I{Ht~EI=k_d zz#JBt!+i2iuGT?Ztui31wSV2sm-NFeFashpz~{P?t94LWts}M(JN2>MGBT+9cxTB- zpYG#61B6wpRq-?VBUmrETGMSo8D1vTMTIj~uQ#y%X|t_=2}wf91MB|pVcq9jYdZMW z3vg4u7(_zIy^6Ee<+7dyex9XKu`ge&87!a{h%3m5W8N}=e@a@pnFqSD1qp7IR_FGE zcLm55`RZ?F3p46(%8Of+g$sW9z#ZF`&3;qF1fgOGY1tC+KFH&RldfTjWKgAu0eE6) zzX~Vgy8u-htsc*ac{amC1LPXy*%B99iqwd;M+^YVA|`-4&?AE_NBeWOjrscjR^_fx z^?t@N6S=5d8iiFj^kiM|7bvXqbpcS=$}9?7O(?7_m511kEvi~oMJ&1A&C+bO&%9g^ zDR>nB9m45XFI%o!Sa>H(LTMDaCfuB)$O)gcbeH2a54mu64m%jhiZ-sKH7GrE*sMeo z#R^f5asLvU45nCIg9Ln`X_X&tV9Y9lp8)tm<02NKabn$DK%SOJ45yQZxs<#he>A*+ zyA=@>z&HnXF*fYVQQ={hO<6mg;1fJROoGM^=X@YI6gDeHK<-T_4xeSU)KGX&j?zvA z;GB$PBe68tasp1)8=Qx;c?O)s?3Jy{QuQkMO?ynCXYKrC%R>^0okl9#;0p&f`BV1>Th+P*gae!+(Y>+XMDv{tm*7p=S1zuLcXJiW{Xp|V%3?1eIG8g4&z^QpCh z^=E~eL9u3V#eA={22C6taYrZe&+#%)x#{o`WoJ0X* zJI3B6Fko^S_T^ELsC*f`*_z1xU z0u-lZtuw9`$(lF|8ow-6O*U9O`5UKyf743M&S*!O4Gd{Rpog+0vT~ z-@AZSv*GE$wlQp?-@(_?eC}+LWjntqRR<03|U11qmZMp4J#Z$V-D~ zv`U>x?OWTi^zDFbLBRf-hCk&>GE5IXfvATa;SR^H8$B5e2OjJG9_0N%DRPp|i3bP2 zXW*H0;=$4I8F-eQcyM@p23`R<+?@gA4lOatx$f|xc({=$xKgg@AT7zV(lj0>4$dV( zIQLG)K;zbmee+-*z?X4=JE$kI%&be@2K+w+lCD-d&JNgV%c~k56KAB$J4_iwgv-)f zsvKu-g_i|!DZ={5eVjQst*ehKm%Y`~_(94D`AH{y7Xs=J6n_8X%F{Ih>P~nRZ*|}e zlT$at)(2fEf3nb$ad7h-DmhbgKsmUda846&bf6wuDIe^G%S5s%xQSy9$ zZUFLNBm~+jhzQ$NgONBZuo%YzwY%_fLGH5x>!QckG;8qDL8nM}@^mM-N7VHS&OXuE zhld6^QfK(t#mX*$-X+qz_~b2*Tl(a}hLCZ)0D;VLOIe~p7Y?~nk{MS6y{e3V?&+F& zpq<>?i5}mX;DVKolNXuab6K|WBHf<=TkBi!%l#Jw7?bExIL0yg|16~Ut7Kb{KN3+= z!wyv^XC_x+Z;wH_@F_b@F{LYe#nM*3ZCEH65er680w2|YqXKvoY%-6&^wtu z#o=;E?<3NU;rd+Nq7+U{w{_4r59%N{u$XxCBuuSMx+g~WtWOB^s7R0U$@?F&;bn=2 zD+(UcaHXZl7$jHO4>DVdiMd;?`xk0PwVRusV=0;m@THHzc|#2nVW#pV)X!D;DTGV1`7q|36Xh%XP$@`lq(nx@COtfXf?M!feR8K(eH`2 zrcK*m%r>}jM6ivEwsD@^TLua&?gg$bZ3>?Zzst(~C#V4Ta|FLY@Jj@^eaeih6+?tI z{c8*%Rg(GBdl=#o2msPK(O+RC0^m-l;C%?FIMFU%-?gRJA`_Ye&?Wf*Dv^qhEu91dWnb6v zkbFFFFzuL3)H;*ng~{>0Qo!$ek)tf`bI7QE!!WpezA zECpcSzb`Lu!VP^v@0TT&1Kg*Og!@=<2S6a_8qJngrvY*84+UsS)5uc|*>{|3;d9+v m#;Ya`aPYa|>SLdglwYT(kJwxq9JbtW1+rkuuk)mnG5o*L9!@s^ literal 46409 zcmeHwYg8OpdSLYnil%{vZr-ov{SYAD2oOR%B!nd)p@(H5+qgwHQcF#PsvB9t;BhA2 z4CAA-gB@j!6*+ECq#1lX-X(LAj5eEWWRGX8nay~rdMknLtj^JH*1N0xS-kdS75~Y8 z->t4^cMD`|W|DKJD89aR-}k$>>VB_#?{`yD%oJQZ{+>Dez)w+shY#Xqia0*==_u-T z%0;8A{1hAHEiamqAino1Z;m`WT=oTA5Q9mdmplcvmL=BebdWb$nA zTBcIQQl?VJQm4|!(x$9q)~WQd^r?)oj49igZOT4opUND|Byo-2tf}m=?5UiwoT=Qg z+^M{=ys7-L{HcPmf~mr>LLH^2hA5ZmWy+P{)_uo7QSZXPaE%p_utW%>-9?(PViJ}F zVdkhX2MJ4tFiTWe2?!VHr_jFA8t69~AO8huzl8VDPYdZz<5N#TUifwN8~5H<_HPkP+X zL&^glpZlnr@wojEaM&Ze`<+b0KXH_}17RyV2Al!+nQ7*7-zASfoC+m7u`DTzC!O9| zH-^{OaC%GW?ZB$>BdZeVnaBYv{=CJoTK0A zcRQXZ$aYLlGY+TE>Af8AO!yrWh#3EbSOAuj1phehKFcvu)bHtDrvmVfh}U<(@vitl z1r;HZC#8;~qXOdF22tjz??9S&rF8SuxHQJ+sdJJ$2xUiKE}iiA9SD6_iUB1g%W>y* zQ6(gZp-_V44kn7e%HM!hemaMkAeTBv9kfD`5BbW7F~wh(K1Le3b&C3mDG)buuG-Xr zTsfW^zktCMF${8$+`&}QSNR*rk)O_`iEeFas(cg}5#v)-piJSeklii={;9uD>fF^& z1ebE1ZE{YsOWod}T@I05F5_3uck}U(DVC=#D^M?nkY1G9qmBnXl`X~t3`_2yqEEzd zz;LtdK9?uD&*h6Qb>0Mzn~yK4%;?m?0x@Uhugm00_zrv>-j%)(IFrFACUJy8ueq{G z_@LE4@AjNI8|ZSJp7y%B;S&R1e$Sk{%hA+2bE*6O2E;t(S%SCTQwN}?Fl_SA&cN4^ z9v}DkJc03X2H)mzf>)RWGW_nq0AvE6ME^l2bH?NIGx*|$lLMZB*FEkflY#|e^?5yK zd{YpQ!B;ArOr8he88T0S@S}mtUiZuLt-@Db^7n_aO8w4>^Jicl^0~<8o55aT5T#IY_Zhd(6*fbxCp~`8X|Fp{m;Py( zsHPZv8N*4W#EUrnVBL;D3OBP4<_RVXgUzx>*wW_+}38zBrgMg^1*{P#| z)^`kHqu=eF44Vi#!b#)f6JDp^KR)iqA>eSl^u?~huBQj5pZ9sEoi6{=`_DQ9&Z*|7 z&y(M$F9gP&Gc)5em;D}>yXom!&(qR`FdlH8ZkV~ultMmpcEu#pAodjE+5bB9;)9o{ zyL8Go6R#w$(K#VHhokd&I*+CE?q-z)%eky7KC5cccqMtAwp>YGqboynW$?mE$;#-e zj-#7-x|yY$e~~~Hlm;u_=(*mrXyoY}hzoFyt_aZ;H&5U2adaC`x3P4a7=XF3RR=iw zAWt7;>4TBX3qy3_vWqWj;OOlsh+~9&K5p^Fwt0@<7nW(G@&h!HV~1&_7g70=`;4 z=ahSVJZv5xpPF{f!h16wwv3Non00z1A&KMTuIUMQ`>?yiDUo3`;r4pR$KTO0o!Hel zRqtlH05U})z=_en3jmA(@o(K&%NlDp^x3A$4QgwE=@v7b0EcvmpRvzs!Anc+xJ-oJ zF}$2KW^@@}rpDkIsGbD)BqfB?j(MgcbQ~GFbEW9v@DnOepmhlS6JVT^lb#8O3to#a z(vL-+4qbFJ4onc694dz|(3c^Hv;>KoAW~0mk4g?jf5sIgl!EM%a7zWG$qA+yi2$I0 zv4M%(sGhi8CiSOZTbh!vi)sfgmI+gxquC1`V2rFpb=B)VuIzF-W=%+ptDheQNI zog{ahb|l4Uhn(io+F^tqd7SN#$W&;Dz4GrOK9i`b(vj@cYoziZ@oo8WqKGK}%#z#U|gC z_)iyeQkU$~%RI+|eqJBU5aTI-T^4mJz_dOXlZ4vSgh>*tCkud$F`>bce>-G~O`Tn;7WB30z0;HM4$%ox8w?DvmoId~ke?spo z=xu(|Gwmg0sDI8TPD%%muyzm`!u^*pmJw&DWB2RPZ5jY#G*FYpl)w|2#2FkQ;na&l z@#DhG7fuBFaRi8gZ~{0$4#6PDAyBUAGg4i|*w%|;Jw%_p$b>l#)b`oH_ykHcB4GmE zO%FQ#XHlTS)MEKaYDeTE=@THD3AhK{lg?Rhpzjh$TZ~SG=6%zQXAX#br#Fn0{4u2L zkGU@e2&p~9cwCsDNK7LIOhiHcKHnK2t^vUe2r!)98WYojjoE?aCR`eKqHreRGWit< z82ns0;Ce|QVhClMqahR|>b4Rh`{$vTwAAf#CPx?ZbTLa80})qT`^MDuDI{6*fkq?L zU|EPR3qB#ypQ3kZh%UX^`d-4yh1IfEKUdw&S9jkU<;r^bvR;nf&C|PCdiN(Kb#E8m zD8&4VNW!8Bb-AD>fWe!Ps;>9iF}GWN+_qi(wq3V%TxCCB+0W4fJUzhD1D`myy`6F+ z1@kK;`4tKYgIfgQ-AVsh|6lFrwm-#he~P0| z^7KiTKDm)VC8Z!)UJ#-SmXGm;jU3&?(@iYh1Z5}#S~$9ur(0RNRTI#xY>qO;PLA&3 z=^mEukxIdaa&#pR`~zLNo}PI%@Acx>if?AzDCE+&^Xc1Hb$ojBV!}EN#5f_X_i}U@ zPnWTD8SyI#(a6@&RnSTn(E#xXVSbvUOL)43rAzK9a?R(`>-qHh6(^tGsLXYUqsw`^ zoTbZS!ZsXDD@Qm@eYQHATfW#EqK}71tOyZemni zQ{1?8RNMr>D0NP1Z)~ziP2v)gLL!B>oFYNQ+?G>Fa;dGQ@THegN-1VJ#Ta$eNg(i# zQqNPD3@54Qbpgc$9grqVkX>n{mgpVdNL(d!L z6J;{Pr*kE#N0J81TPsT=3zW(-ZlNr>f3{YZDNeggo0g@&69oo{s$n&L=2K)8cR;3C6ZOisH2^3iDSUM2#d9jwFx(D#j8S4brzIC=v7GILF_B zB)_Dna}x2b#@vE=-pq8(n_WrrcnHQMcU?&Vq<12hYLsclugg3KWVI_*J*L!8z>X_& zn>R3N>SuK=afHqrHE}fKjl?l&;%LSR8R;q}bs<^#jotztRm0z zgPImDSOVK*A6NRk<@c#KHDgHlb}m?6v|t`CJ-VLPiLI;2n5#TK19BvHM7;F8Cveu` zb3gA0C?roPX9}Cq#}9JY8UF|&lbIt>woI!}AT93?D+%(5mx3UV_}F6*Jgdh=Rig#_a;q7^HjC&xcKC7`NMZAGyx^IE&4d5?;cw-Er)Iz-~CI z8+(q|HT4{+YwkH&*U~doGgo}-MBkB-;gO+d97Mk6m=&5n>GXKrt}aJSRygsj(+_%4 zM$tk8>Ud4S4LKR+I26S^iQoi)u;mD}8PKwaVL(S}Vw}D+?y$iNn$iT2P=kD&IfZfY zlf^uZ325L0jtH@)oR^sA(9b*!a`a0i3+5U0NZNC0!c8RkVbcr)^7Wb&!X%*j03@OU zQ^dF+D&s~liQo)^vjD<64@l|>i!f+mn+4fNqelNHD zt>SCND+Q|zm)phXb}c@!UReFs__cAizULOh754Lm{fi^(j&0vf`*s@JIB@$2=h(+P z_OX`2_2SyMzIN?v?DoFf&0O(bzIZQdDOfLS_-^mFd)b!ZJI!3#2wyhBS{xe*i7DCZ zRy%JkTeH@LtTmi<8*kmlTDK9;$~9|U$Xdr)8+dC2Yi%H&6>HXQA?r5ITF+bSS!+G< z+`eXQ3t8JZYX@)bV9EX9uB~{3(x+t0;2vUTDPh9|z7HNe_=TCu%73$EdG3wg>%Cm& zHa>IP29=tU|0#kc-G)9TJ^!9PhqqU)*&9OkhLsDqFZ`_K&WqgsXZiilhK8PHhn`(+ zxSh^753q2%+?;)qw@)V)~wkZL-xki^gB&I^Rq`MxI-@fkSny`#lqb>czYB$Bo^*- z=Q+EVw|iN;7r2(3+&5d;;ubEymCtVtWw)}~t?P|Vw@PkbV2>VS%a6mooU~qE|J{S% zKFBs>>CMA;nz-_VeEC5L$SL4+I@WS_g>rV?x^TOO%Q?X39AI+}Ja}+7y9`j6o-c!Y z_PnLN_y?Fw&j(CynA1`+*R%7l&b|J%*S>a>xv{`yxAEC+OU88@;9r+MsDqX*r(Ydf zvzLeL<-yVS4DSuBn%_C}y+gp1*5Qb%?&qufIr{)_A7JeRpA;0YZ|h|3yMJ$BIhC_l zK_{^Gs#_P<8+uv$-rpNwb80wyEg+e-*WMmoukT^)gXmk$*=u-v4QsEt-Slaqu`uIv z3cym*hMCGKkgE3rm)*i=w@B6NiK^Yo@T&8j{lMT72As3+Vr?xa=?|;DTBL~bT*@7wGRN?oMi`Z*}=xTp(YO?ucS`+ z^}h}uj;|+-6i}~Z1NcJ(f8N&s@PloA<%3r0!!#W{d}uWf78yRwBkn>Sx{FG||3|uk zmVIsdJFOUSr_DTUG~MadgZn229k_pDG@zRv=r~AGf7OeTenzDqv>Sewj*)(5)1lkm zXM~533(SWMhL0;VMs^uK-em%JjgA?F+P#y=?1Uet2LURJm|g^>2!0#EZUlV*lscvX z^h8jO0b>AEI;J|-Sht}sGgWU;@xT+hL7fTkyEzo}PgvLl{7aW1;(B!bQ~IDg;GRGg zGT0Ld8mJ0zDKt+m62a|qOrJgnF&#v2>-K*kr*sUUD%Y*H$-ye&|7ZUez^_;d1(TC- z1to4*^ub@C??i+}3Efc~`i^d-CP5*fM0s@BJmZn(VU!p$ClRy*5H;8eVO?GnRwZ%W z?_vrZY!4CFeUexHM&0!~(6y$i#7Ec1C4DNWRN;Pz*u?@!5X3Hiu(^s7x9HVGa+J6w zIz=o2jU;{m0S%ZKapewu}I90ROdyQ{pX&BIxXlI zKvwQDo+krgNyi-3)J$TT#ZvKiUO%sCmmjH@o$WjaqV!Bor}gNnAuDU{+1 zm(i7=)mv&A0N{X&11EX`-Gb zqIh9mz?|j*z(>3Tqg$m`3S1Cqas=Zev?&2Lk8nmNzt=P2X1)#y{P=kg*dyVa{@d_8 zR~Y?GS8zpdV7fE#6+o`2HmPUb1|%ymMO$Zuf(4wdfwwh~WUJ+Wyq#?xCaE_PjD$tG znzm-G2w5vQYZY&;V#&QZiveV3W&2u1cc`M9tLWh?dN!!6l;TejaMqF~^YWg1ww&dr z*E(PA+|b)Hi`O0HH`3M|?IB0|>fG%%&N0k8hL=r1-WOD^qCY0T;X=UaQo_xP+>P) z*e$11sD-$KUcR80CAT(RN2suiE$ouhVR>bOI_Y8Q+Cqgp*uouhy0X;^QR%RGCBf!v zUCUU&T0woNpnk>06}0dLEo?yx@oNedG_96%1s!}r2V2nbj}PwVR{{B*i9B8e+{-Ci zPQX8)!!wJ4=>4>UvKMZc0NDiX4}Q@=S#yz)hHel6p{BcU>DY{JxH-#C-m;U8b&J$= z4^q<}7;s+589v%&0yogd-+=O%ml0^`-`_z`1h2xs zA0Abg!hjMl*RdDW59@mxAt< zRLYLPSi0WG}yBGX@1BWmj=o z)qGa9Xi^J`^SlrZQorln@9Dpn%F!J>-NDiw$m67~(P(!NYz1M3Xb=lFqo^TQtHIIl zTWPPPy{W®VXfqeP3}8lqcQPe@vHP^7~h%^=|*C9i2C42EAdgQQ%S0y^(l8f@v6 zh{;k(0~lH%UD|<8SSke(8;hkes663K9^NDJ_>ZmKvg0}9D2zF!FRikqzA1%UKVteAWFY0rnly3xMe(2^k z`ae<*i~-?_ZK@bRTk6Pl4n-p5QnwKGy-dSA?NVsSgNhL&rUDiwF;F2t0pqP2fuPp| zhF?2j2y6{uj}+L8HM%2ARHa-3VWN@(HL{N@c|IvF6P5I0QiO>Dmx771#A2dkNVQOF_(uWGpdF49>WOO(J%8 z#88ewQIo(%B{J?AuM^lCVsnNoIU*|qVJV?PU-cX&xP;&`0-2FA3l?i z5EM+aXch;n3UQSoE=x?6a50Wl6LSqySX>@I6n~jm_Y0P8Q0&(s$dCOfDWV)oZYz3< zQgB=AnD!3H8#r3`OY=fZG`f9uwsC%-Dmc3loHa=_H z;?PEd0U5jOHLD|Jb==HaF_4ueCCz+EGiPn#tt~9M1tt%eNm3mG2xAC(B#_gi9%+N7 zvT_#puIJ`2K5?(6bFHQ~RMX4V^zk))8p9Kv6LD#tqbhlQs=ro@AZSP9Sl}* z_G;c<&DyK)l{KxEb%x41Z+18;}DDgk%psiKjx66iB()1Z>BHUlctqChcFt zI0YuH0hzQ0WYQX_TL|9I$nC46-ruG}cb&Pf%kX|Xad+y_-IW3UAEcD`H&Y)pq5lWX z<^js^!A|1t)q(qmR9`m)e7Lhee<+{&DBnC(WB8~7!#}Flp}WR_?gsO|Ov6VzGWJ;w zcdRCG11I+fP^s9Q+?yDJ;3N2t=H#kaV-<9=Y1al74-|DGC-*QQaB^741pG^vNcKEB zCzo*mK0m&R%Y=!GEFV5FFzbgkY!0wZ#vrAkgMY?z-tDh*xZD>(-m{%?lNH6F#0Nj8 z&*kuXF1j6H^APYmgcWiPU*r$_Cfy8VMb^fr=>u_!bxG{r>3`n zEtSNsfE~#ln^ps%b99a6G%=~8K%rcF6-IHP4z0$hmST)in%sXeZNrpp(56<}bwy!8 zqN^YW$sGp^v6jckX%y_b+;R|_M`KP-gRE)PrNR~sHh>m2wqoSCId-HnwW-g5CU0C? zj-h%}6r)G3q#A>ov8z~KBBuy6$nJBJNM2nJC9Mnfk)#7qAITlelk<^XF_xDo(PL<|y zU`wvSSH@NVEe9f(dfdSnafl(%0?8fci<_>-AL-Q+F@3O9bSZz8xoT4rQB=mreH_g9 zv6c570KRD4WKDcKtceE|5iFwVa+FO@PIm8|vxswH^_0JcFg8RrL=5@D2}j8^dmm?n z`?~>zsV|I}5MXO-Ygj+!pW7$q*VyQ8hb&c#@LRCJ7?qkLYwNol%`=xAjqtYv;?m<3!gp0La>SVPz0u*1}fE zPFy`Cuy4;{+|vjWFhMe*M}$2Dm@@Q6J~p0oFF(>T3W7)y?`6U#-T%Ar9zW15g;y*V zikKWoE6n?E>2LLO^}T$3FITynuiVYieLUUA(tQw9I%-Q7g}9 zCG&+(ifUpnM;cMCk0Gm+wzM26QeyPFoKFgBxq>>r0E{VkI+s*9ca0`1W2!lN8&7Xz z>20ESRfw)yfu;VnJYCDuwW2qvLHkPC3bP7^gYA4-yGnGaSPvvG=D(Lu)<)&TlwoG& zT|M{u^lQ_wU%(WX)yikJu8#6q9ip5R*3Uaav?G|ISa?HL)ki8)`Sz|GupvNXLjr6% zq#&!Jo?O}c_Omyhl@}i=N;=PFHS<}`t4(}Xo3Mqja3_XHHQbKG|Av1?0KMrm+1<2uq3*@c^ zAaPaf_!E1^ik7!tDI}Ddl#^11tX;nYrSuZ%ZX(DMl`T|oIbw~#WG8W+R`(dRGI8E; zkz(>8wcOe*r@pd#-UxMgu#gm=13|fSYzeY*&m)z=21?2wm2*gl%`43(&Ff*MhD2XQ zuYrP<8s;dhJUq8z1x_{cWXLT!DmUdQkYvGf9#E%fzAGWNBpCWh;y=`59QrbT8j1gi zN<*EKMp_&UM%C@gKhB$e3B3+$puiXi-ZaI=4jK(ZRP7($PrvE)$RLZt+n4Kg#n_9p zOP4vIcsG=5Z?s=7S29_-waHo}812%?-Aak%U02jVTPnVto2`F>JWi|j`_jDO4{OyN z5U>j>9B6AmR)L{e7P(^Y4Cb=JI;iyDH@Y%l%@+RJRAp+kKcJO~dP8v~6X0&Ma_ri1 zf$3^8=l<3#Y8ga|AJ@5bIb0sFXeJAw&bu%ByQEkcyGn=(YK^Kc5DWSPj_FCqBy3L= zs8h!icmt0QGE>EcWjvagg3xi?4<$~V-6SqZCa1l#Xqi1eaTeDPDXRflOLYh*NfjM| zUAhRP5~)l$O;chxBRbpr$WGq>HwZohz??&Xqy^(auphvEWO43S;v?Gl&6)Zxp6POY zVQ6TYoHI}P9rKQZ&P!d6xfJ3Sc)u0=ahy|cw%HtE1R3|ZyKSV$ntkdYagy13oD0Oy< z`4PHsLkZlgp~fN@1zMB?fkA;cY&AicR^})8kPL_t5g#@pTgSXeas;hc*x+$p3LBgk z&j2HP$&c12;_$*>g&M2qK$LJ=xt6LO*|EQa2?F$3R?1oc%8Df0@(ZhVZ1FC**DV>K z)Ui7vOIA*;mU8ws-rlyDbT{3;^z>S0MJTf(I1LM=GQ0T9E-t;BPw!q#xSLnF>|HBr z2o*K3+jrkC<%;(3MSHlsy?ox@#k9LwxyyBHd9|UuT6SCS?F=q&kk1?Bvi9&lvD~!sq#;~#h0IV*tSg$;>W+@3-aN7Y(6>q6x$-N$7 zhCazE4kmF~wR{$6)(lBa1blPvV&9@4((;z#HA`j4f_tu&ah3+&(!g38*3+y@dlz3= ze1WX8Dp|8uhpg2r=_>=AwUM_rvew22_iUvb6rBWl0$3clYb$!S=V}kB&$sr!vj0uv zn@-!Rz}uOEqt)W-Zlm{CG>jnx!mc zDGLsAmKxqt!;+gUuiK<`11q+b)Bhq{hx{>cuY_?Qy=k~Pu#)ul$c+)`qw?0()10G| zcXV=e7f*MwbXSCd6eF*C*z&z%ir~oY(K~w1v7dMB=ja1GeSoD8+)K8D>NBb4Zbs&k z=hcR*4L5DUlW!N@DB?1YcYJ-Yrab?@P$3Q^PCC_FnV z9Y>_;%P~APgz z*k1y{u?dU)8|9lYY~rY$qR2SFC`v@h727mP$=IsY6Tfoh8xdMa%&lrHNGd6@E%+Nl z+#-)2iN#f>2p~ljxz3@SnOtfTR--VH6kAXq$sLTXj*&18=2jX7pt=kq8j^F9h-|S& z_^av*jcPDPZjaLP$FEq6HST0p*yF&KoZDB%R-i-1vw94Hsz_2qL4Ql`*n+nh``S_G zB!NLtQf|qmftW^-tF{-3swicI`C~}H{0V|TMeqXvd&36!Bx{sD%%5ZMUjR_5b|xox zG=BxvPVtvj?U*NL{0~#>e1MgWtEc%PhJJ|PIRvK>JcA$s0WuVu>tS%#QcR4J4rT^t zcOASSFuSX^BmNcZVD9EsyitE0wn!6JnSup2()U4=N*4gWMIpNA&A`pQD@`jCt1Vo4 z2VdR+%hHRx`C_ou-pSKDS$ZduW|AUAu})Pa{UhOOTW*DRB90#9k@XPkiaZ~t@qoSY zq;1i@oNndrFID5Qo*1R^fR)K{H5W1VGux7uLBENvVrv?Y&GaT{*$uBu^yM;oG#Zx$ z+ygB-BKWvWQE^clAx|_S39Cx2{-COLAVi~q`^?GmVtl=$t{8UQf=0tAMpbT0td);J zDGs91pvkUQa(z)Z9(B#r^BM^;SdEv=8;Eq;tXxK{ZW&miC&so94=5%mrGA1~??0+e zA~DY5A2X3tDr_*cScs{UczABo)D=_15lbf__hW2Jt0;SXz?nqgDZ2v3ghb+Lr^Sb) zk%+vgG)i4m?0%1`8|WkiP1NJ<^)I87cz8ekrq?4w=P~uVV(bMaU4e3Ry38sjsYVXd zzI9vHNk}7iD5q5)y)Pa}+M+%8fX!Ojm0u7cBFp$Cg(L zd=X>@DJBC!bQ?C%ZP;;7Hdp3+N=!WlozNRO|BT>y1o*Ml24Ei{x|= zpv=*dM%v?R^HOeT3Tgc`@^FAGZ(f$tm+YM!^5Kda#|N9Y&X2U z@5a8B(YK#i1<`p6?@;estq{zw8rbqKkhPNp!F{*V!6eYp%RA&9x9=4?*o>;BvZd2k ztCp&Q&c(#_jDpu2UPGDt(YJGM65vTL4;Na-k`4cxpp$M>%^uP+Mv8!pfq}p&)I?vL@hPy5MU8>-=piY;XtTMXZC{EZl1 z>~J-&7TYQ*dJPN_K%2!pRJ^W2v@3>hB7ly$MZ#8gId&!FN=1M`QsS(jK9V~&7o`#z zYxtWOw6dI=#Cu5vgF}@Ytxm6gB3!=AXK&teiTYLNqiMNBi$lvLcN`m@SjXzfX=3u6 zs`hP;EjhQZj4h=tj(Vg*e@nD3v|4h<5kPB{45S|<=|gPGAZ4y<>_<-ROO7po7G%$V z4&#DIoSDA|mn?RkoIKsq*!UI1&bePg>`dt6FDFqBTZFZSTQ%{Ikm^yhfFxu7cQ~_M z2LLnMuT#eS&sF>@A3+zfDR^W-^WB2dH}+lMr&?Bk>j&6shNJyF?PqC!ByZ8+eLF`t z@^m9B-h?<%O`iC%N7~@C8=%cbgLl}w5g10*wjsJ8k}^?Ow9&izbz0DGDVNO!!iv&C zRI7NBtK`~LAiSlzY-UK0qT<-)M;!r54A-S8K@$9mbQl!`VkP>DEBKXin%6^Kf3rX< zKi=w3C=tviMCT0?nl#~oTk#P`Q}IVO7Eg>#RKQjo(Wc<>ZPezJvL#Yh@VDUq8pV)Df~z4kEkoUZdcZbDE;yG7>HFL(a<}7FZ`y*q_-NB zA8SMm{^rqstb1sgP>RDD5JK3VCp9x06z( z(`;d(3e#+a_8p)K-jyf=?fgURW9%ca|Iy6sxL`USHlLB*v_E3zPvqBOosW4Hswgay z{xft9BS3T#L;L$Dz;(YAA2qUmNv=P~?)1+%eXur;%XE=#}uec2q#QVGjKjSSp-m%Bxn}Q!U}Do zTlfcz`Tr0+KtL1>|A;Q4R6tdNA1Mk+U!XAQFN-6AQ`+_4h6&974vZYs8Ib*6ZA4RW zH#28B@2#S1MJpB%@aJ~&xt)UP{-XI|LAI==xz}HO?Zx0ZSOGwewcu>+ysdqaMq~ck zw;HZBtQ6kTb9vo-UN@Julh4`-ssu~cw?NiDPP1BFdo%pf87QfFKudl zp0RR-D{O@G5J8Hc)5_G%=4gYXB&?fcY(&~j@SN)YLlOXI3P|DM#EL|F5?S-WXYJaj+2G`GQP*mBh0lqS2 z^M?OR;4fe?onf~T0{V>mji2Jv0V901@CU+1jH4KK#P~FZjToQ7F#LhAapP$Wn}9lP+(34^dc0k`CUHGvR$LgH4SS~#6!JwKZP@MTsZsHF!;V+AqK zs7o6UsS)Oz5Us`_%PzwS9-k+0e7Ht0Y~a`l;eO1{p*&hRQ)(%+TM{Au19mgc###{~ zN9XZ$9!uxJJMgXKSCYS(awTPv{0RarB|4CJBIx*!#GX+l_R*ZZvY=C1EV^`Ra*G`2 z9TO37kTbc*m$W2 z-hxrpryAmV!KAcQ(rUPr+9c&pXWBj6AYQuIi}R+qwU>HBTle^ zMxz5gktxFCLVS)E!1E~vnL#!KB~ly z7I!Hs>}d9DgI4Fp#Exc%Gl-Wb#+y%xiybMisFuc0Bjp>FCLVS)Ed<=hdw(_WAmFh^ zY=u@czmTe2@&iIG)t4OFUm&KOPmSAtDULcd)G#%uu$&fif^Uc|DyA;Zq9Dz91x>7- z?4pn}4Ag|j>4io^Vjv7Uv~U>oN8$%b%#D89?l)T-Q) zZtvc{Hc8?8yxVi;Y@o|=dfMyicFe%B31AKbDH5uqswMmyDFBi#oOq0JPEL9z=F*Q%2b^BVlkT$~I1bj|owS?tK)#$23fO;4P=MiDUIHWIx z&FKCJT;UXP3j{*ek|PYm>5B7aar*-%z#uI}&UeAxBqBW;33?N=Nt5^dCJD$k{9*);f}%T|E%?sJsEh<;MWRcHjQHDU0GH!(fy6jAoB}&Bpw*`s z2_I)9^a;KU5@q2eFbQ=B#!m-)VKa=2qXEVZJ8^`Qz5VGD!fO+blXe2OGZo)SjX`043O;Z*S<5||c2$9TkF zK3O>uAzf~%7)5ciawL)s_E{H)k(5)0(lZi~o|yIpm}#%ykED_$+Y%gtgiD8Sf=$H_ z2pq&05a53WhQdqKCt0|?Lsk`^RkfJ>3pn-_IDD$OUl06R&VU{mOn{4)b=oc*>A{wr z;pnqGeU_!q5>I4}Zw&svuK^pj%@l@&X?}s=uV#Q zWa&<$+(nN)!b!rIVZl9Y9WIPp z4)Ac|E#1n~tt{P2+86D8Pb$7nI^2X*iEQ&%7W4=6IJ%Cf>sY$(p46ymj&9)T29|Ds zdS*L2U|bAg<6LeVpWC*Wdbb?5 z_}Imj_wwbvSnZO#RrPGcAXl}AuObaAU#GJon+@9ek}i(!=IL&h?pAsCaP%&o-o?_p zWN+98B+_|MeqkkS2QM6zMd#kluVkxsaQU5lKA)q^Ze!^-x!cZ5`$s~x?8Rg&5*oUNg`9JD@VT%@;+52O+A1A} z1360@PJ(ZKh|XW`jiIPcG6v>FE*Mm-HCg z2de?UV*hCjQAqnFsS0VI^z?}Bvpk0Or9683;=V7b+WLT$NO}`d)vMjzjcJ0n6O~is zioNC&_FFT=c|TE!4&Mw}jT@_{G&QiPQ;-@+?`xbI*yGk9^GnnqD@F|@X;qvWWXG*R z&Q@v=H+8Q1iw5Ub8JT8R9-IOd`-_;i<1TXI zmTG2?q7@1?v_jLP=ULM{_p-Em;_(?U2ynxeQxVkV6bKB%3C?s(B-FOUFoMbd6TgGHHax&>u<2Z?uFwq%JB3Z^mcwvX8g5ksnx5^|# z>N#sfDk8~H!l`O$fM5h;!MG6#ib}WwLJ1-~P>2bb6pWk^$ykySbUDJ-NY;{H82DqA z%-}4@U*+9$Aj_R;kV!!=g=xP`vq9PSR8kU|UNo?R~PC)=c7S}RX z49&#QC>c)JrjbpM@(d{1DUti+pdd5QqLn!T-N4*O;K2?_iwLP42LyT7T$aY;xbM6V z1ZQ*ga@JxDFD8NWkxsk4+G3z4dyX7AbmSR_BnQw&smTzIqr>sb{5DpI81TG+u6YDx zgp%<_#t#kyVwl5VCzH;T5lQeBwgm)<8=Rnk!W2?7a$H>vEl6aNq}Pia9mkYIPT?fd zx7ec*X8^YN3?~J=;!zC=_;UFJe*7?r5)UHk2wNj-e5GvX3Z?d<9#k7V1XPp%FF=@u zdvy-!lDgMzxL;@Fh;KX?ZjVklJg%6tHSso3gzM5rb*nDEeIMI?jM&~(uGyPI_U6^j zAM|{`=XUO$ey-&Z-*SkvALi|cS^MD!aMT;gbPt|e1{n(Qt~Ha*8n~?k05@kHoD8(G^tSSn!P<_Z(qIqgN5%e+d}Xy|qyn>7SCXWhqJ_pwp8SfgQ5Bf!v*Sfggn z*23FbutrboZW;L<2iP4?ks4L6*;_;Q*42|gc;@@humi{06DPU0F}`h#v!CMar&#+b zsY*knN+Kjx+H<>v&DsMuXWh$N_p(v9Sf!zVu2rJ*QhL|(%HFEKR=<)2=S}3b^Lgz{ ziJ#cu++4P#gR|}6Z95QeQ-V2H{hGZkWN%~J&)>=ZdHzrH+2d~Z%vo;O!w-AF6K>A# z1u>7Uo40ji9v6i?8rSSQLiQbOXW-8BKY!t;FR-V4?92u35W^p0z!PrH zKFix@S^I3H9-W-6i??-Q9)2N@?Q8aqkiCQ5apBI1pP%~aDfZ-fcFM;coaPTsgD2da zo#E{aYiI5il)bh8+J3gWhb!2{7wlR}BCnDmy=&cZ=~mKTruCDzF462BOhh1WT(P&c2(s z?`G}0Nkf8)SM_NjCcL5zAvF+ z)^na+P5eRf_mkP3qwLXRT;p-R@i?3FtaN{B&}U|S4#&r5;TiH-4<4Y%2v0gp>6HN) z*gdykVE4eyS$FZ)U2N1X4(wiVY|wPde%*RO#ajoj9b{|w!oH{l`}ul+av+pwWPBd zovTg&aC4R}-qOX!x+7U7b%Vp70n^6U^`m9fH~LI{Db)KG9k}05G556?-p|hHYcRZD zV?v(>6Z*6m(B08z>hGaG*ojd;=rIqJ8Ge|UF;HaqVJ=4fVUY=a$}s8=tNKiX_0)%T z81=(?^B$Yw!;XwSsfHgVVbmX`n!x8rHjMhC>^{@pLh8o_81=`6<{^XO$5k17cNu=% ziBW&N%LG0j88GTci9@!LeCp#ojQVlD`H;@=aYe?+PQ%9?81>_wCh+;W4x|3u)Yo{p zlVUr~qYeXS$QaExaG6F3;POrAz}06QF&RQ86S!;4%ubxQdl2kG(2JlH z0qO)8WVXXb+%BDI#pm4^(g#4fu>POW6TxQ?#gEj$gO^~s6#mwY&8)F`!>BjmV2=j$ zew_(+l2*VmJ(Xa7>DXfVZ;ib?wxJ)l+=` z89?wm2vGe-HVkI&p$il+0{Y*fgvNJ)#~1X@scF}&*S(ABhF*c2@~;6Y1E-_vbQ^ku zP7k|q|55=>>B?EEEc)KiQ@R6C{I3(PQ>WNRzc-Azy2=eI8Z1?PioT!ALG9;ty3P$M zHe5RYDGB*p89ruc)xjaE(O^0GQ}q2@4)W@r&|%!z5aVOW=gRP>^e1&X*dsbNT+aWL zgnX_Hzo_rg;dU3X;nJy3Nyz8Q@E3K1IvpB&#{XG{wv*7$)sYV9C-gda(PP7d}uz8usB_R--U_*I?q$vsEk&qBx0g~`?jPHe*_(9IS4uO)< zEnQ2JJ_4s&yG&`x)MlNsZrZGFQ!90iG-+xzbvE@@vN{|rXq%Cc!eX) zsqNJ|b-g-gdT+YSQ#vx7nZ21Z9plJyX7^?zM#1c7c-5l}A1k2iN`|>X|3Xi%o{u}i z7}$Tn(2zQN^LLtHV#g61b~_w(Sg?{Al&dwMU`kKR;bxa;A2}(BNA3KG;C&J8Q6fG0 zC}ym@vwl~9w(p3fVh~LlCu=QFnXJl2Xl!gMX%f{A@WLjcFegV=>qZRWvPm%isu{2C!336DA7bt zCu_;$sT#DOlxs5;Q^n+i%#8Bp>(t{7@G3uuk!B+_!|(_1nI+V6rs)D>IT#UJqi|@w z0`+_dJjYyMMh}J@7m;gO8K{`tO)e?eCat(;n5k`Rwa*#Ci5RDhkwlF5CSrs{>h4J- zp5gU;i=)MP_*@BWiK&w7G~^qRuo;>|Yh*y}%Tbc?vRr#~0S>Q?2r;hiEsXShM2}$# z8GPW}_BAzVo!b5b=Bi@7Cf~M^$84!|)fnF%tqRr3InMBH=eB=qyNeBdkE3zstnxTB zj;0JyN1buKpDD$BJ3)F~hLZ2NuAmvyztDr2l{r1%dEdDL{>Xjjw(?!~n=9DHcdwsY zQllFmvkF+PwG9z8}!gDa!gDz%B5BL3$?^8cp1>~*}&Ra%m# z0~>W!24bp+Si4xIO{`ca`v&DzuH$`5_1~7YQmPH))|QBVZ?!gY*E-p!s#D0RB26e> zG)hv^(n#&Lum(CMkKd_`j2-gWWQ>fRFizaN)+TbvQlnK7DYra+eq}r+Yt^sQIxTZo zx0qZXgk|pPT8Frz%w1hSnVTW4U2WQS+GsoRcwee+N88G%yC0)q+G&s7cFH1-+a=pc z3vCg0n}CgUPJX#Rf{RFQO$GWT>8Q!0Z^5yeHhnpr#18^Qi^2FI609*K{tC!ifMRdb zPGfA&WOM#+{5JBgBj%SvB~7dPAw@)(2>dAo2D!K>f2hIn)h?0MmwWbu7xIgiVF#%gz1haAE#j@0boraj4YIhJ2|@q1idGmSNLQ}ATS%q!QqdreJt zZWv)5&(pF++2E`$egkz*vJQ!y5^Mbqb<=5^@RVP~HJIwokfUlGPEq2JN%rcDKWBn+T?^ zFzOuR)(Np~csQsU!*-X1v%sSIjKfK59^X3%&0&B`OBvz#pwcFaZdMb;9kDLbNDf;DYHJ*aZ1_ z(H;>8G9(=e0|~*HA;~3*Ci0g9hPh@oE_C_YOp=>3;?B}Y#gR0xskulq{dp{tmOX2D z#xN7Nq~qo@{5nHGXP8N#{Pcp^foBG05*FFi_t>oq?ACeS&z1z(5+7S4^UMpZ`Bg=r ztkKUl1=uDZ+a$BO1(sW8RGZbGFsgXXX8?2_nta`L7hJz z6X0t7>OBGV9-n&8k~+ngI^b6i2GoN-`EzG!Q|2h`4`r60fgSU`Qu zr#`k=TE5nN+e@*&;x1oDCtz`xpX&~A-F|gXK;7fJ=W~ZX%W>cfhy*5{uBLv0)j!wl zXNv-Ck&i7RT|KqC7FqSvi5C+u@Au`J{cKr)E%UKuVWv-O^le{VW_hKC{H!g&+I+0d zJJ<7?vQW&Y`|6t=wRL-|nVWm76YBP{%A0jeZQVYl^5#CJ3dW?P_c4PNPD&g-SY5(d z$)LdS6CtQNV|9205w3My_@}~&xxUTl7TMBgrXA%--WHukeut! zj+K8{jFPMUx#&akdcTE==UKA6l>-WdTFH*Vyu>Y_b6Hj-CkC@pRytDH)f~Avn2VOl z%Yz}^f_s+SwdA6&i2lURushd@A!VN#d{j~B!i_*S0UQ7@QD`-2 zaS#0tCeiHcJs1lvxd^xyB>Dk{0Biu`0B~0E1i&Pjp2%p!=Yf6{;JW}T#lX+PaD86i zr_9OBsP1IK0(BjJXDJlkS^i;!ioKfQ1cZ7>=15HmBrY3+@&dg`A*iyuc;T$5q%>{O zp|{Cy^6*HZB11v0jhvx2GEaJAGGyURSg^UBPJycMmSH!>W#q4pEr~n9ntUs$A<51y z-D5C@g~X=;FomF!HfihtDuKq?=z7hcgff!{k`FRV)8lI1zV>s#qPTcf^uo$EaJ}q;G&E_2LRvrYk>a% zd;xF=;J*Oh0DMW%`LvO!1cAxWU&Q!-Kz&8Qkcpwd@>#$^p#BW7ateYd`ux2>mC}Vw@`VT z;!lD7ER+9f--9LoPwh>hyk(9cKbB zo!+OS?^>4766;!5ODcYtMQ6K{jpv z@y+b-oqNu?=bn4-x%chy3x+?uU`Ts0H8okl=cDr>Z2p^u z=Q)g;p;yt8`h1?C%deUG{2|4`G;?oTD5(C1Z8NfLu$o8m8e6yQZw-wH{UMhq?{D|I z6jxyL{$c$M=gdVc4}NdXX6fqZ=7!|-LX0mngpC&?`8sG`oy)S|!0OCAJ1X2{i#>Ot zKq=n3zeIg%^@I7QNyC{|xX3M8lSXmZSp&`hf02g?cZ|8>?&XZu$pW$ce~b*aKb8ew z%B!oC=2$>M5IfFdHZR39dR%bsj9DKShT<}eR9NBTO>0b~gr?kktYTMUPbTQ5n^oli z6*=%uWe(d33)R_1cIG~sPn80(z&LD^*sXl`PJ5_5CMRe+psT96+MbYbPvWqcaA&M9 zCJ>h?VXN4i=&a}136$fdO*xf|*I`ehCx-1_6gDm|AGVvqvL2c;v%`orn{_UdyGe%T znk>kySOwmide)1JvX=G1D>eC1xgS1>%X`6A8`Z;w&FN`4wVYT!XChtq!DDfk=1L2^ zQ=1$K1f!Lfl*&Z!bpwg>(^sM@(T9B-?yrjt8m_j$59*>3*#TIHTh7&3ptB@4)(vbm z)W=%uYAq0oxB6-=Dccgqfa9p!zC?Ecd!=lN#rPCk+v2UFI!j8+690Xui&ti3HDDHX z@y(Cw-auV^=c2k>b?Hgs5Z(jBYFk4OTVH{(f5fzm!=pa;uu~rQDQ>S*l*W8+iLXbS zevqemF}%LD(x#%~)>_0D)56QGSF1XHUDfgC-YZpo?^X3p3)~LrjorB$(a!4$i~Cc% z0VWz9DL*A~E5WY*q_HdszQ5C^wzNHOG>kM2y`5Rq{@u?pwgWzEFJt+T z-|-)Kuk*CAgcquZyHX4(rb9|lmP;_uLAcgqw?{4;8@7+GX$b`feV!gyQ1Vy!f<9&M zu2Iy>dFkq=-jY=C9BfiQIC$P@z?5%HX2ZMUh~q(Y&{oKj(k~rR90pFem}VYx`9~$0 zZXjFqHah@*sgS)0Pf8oH&fkz0*u5}4xPOBSefT`WVT|-$Iq36C&Jma5)tFPJJI)r1 z;enN&eD-4~^*Axc#i4BVjK{#r=n@!RgZS{&^rx>pY3uNLyo#;gD+Q!3hnW|kh0hS? z;TPWQjq&A;X7(tWj%F>Mx)SnuFb9Pqyb2cIXk;|_q3?x)*U>vR%_O(JUJ3J@1>aCU zy9CFF>X}`=I5c4lZ=kX1VD)n?r{KyVTc z5)=aM2_GYj6DA4YAxsg%glWRVghzxTWPFwREGd^R!4vSuV7}pPrv5uP!7%NIMv@uc zF*Aqj%4xoOr|AChOQ_JyzMv=_N!*<+aQ$$x;ZF=w`54BS;i@T9cV5LMcPJ2$u#4y% z3~^ooUE@V5wG>&e`Wl!VFWMWRHdO)-5<&>g=#doeBPkhIXJ^oL$S;X}2o<=oRBHTf z!Y>Il7XKAOHY2cTsBGZ&c(vg=Q_GL!7%-`}qo*00hielVtPnn*C}72qIa$q&&^h@W zns1=Yu0p|7S>+Xsp_vEq)2;B|q3ke4%F`?>E=A^7QN@3YkWGl_SW{EqV5{KesW67x z6tgcL=^Z)*seOAjQ3O|Ig7lwEx;lpWNb=+`~A3+D*-*;DP&E z1%VNdQrHPXv=s1PQ}j42!22_~EJyuf<`k8}+@rHe;g88vUtc=X{2F01;X{I6TsKJi zGvP0Uj|hJyd_uTRSbhb_`VYe22=q_Ok;Q2T^pnBEmq~n+u#Z58LNoe;%A%8Tr^3I^ z8Ns0OC2_2XRMyXED1GJyKPOS2p*|Z*RbGLhea8pOtl>l{YgFAQ_84)^=I3UOc)jiT z-ur2qd4yA67(rMm>M>814o%ovIAe zhlGDgxJX@1he9Dge~m<=tZ@DoS@H<$2sVP1@EPF%!9$>HFLE0^LDCk&y9g~QbZAI& zN2;EGKk#iRG)wPG`rg#bM10u!+p{hx&Y?R8!Hbw1l|CE##q3+6q^m$=Hw?>%mJ& z(gPHu^sl?N214SfUuxoaGXXQjoGPK>*~{?NvzvFAdXkyrrttsyROvr;%k5?C`XK)W D81fOJ diff --git a/qt_app_pyside1/ui/__pycache__/export_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/export_tab.cpython-311.pyc index a6b9ec559684551c6b1e64a3eb438c6b89e02c78..44ea6cd45667aea16958cf07ebb5601d67046217 100644 GIT binary patch delta 93 zcmeyhlCgUgBlmJ%UM>b82vJ?Jk(-a%G0@p6CbT%Us5mAiwJbR&zceMrCAB!aB)=fW tEi*4MCp9KHCp9roHx0zmEhsI{h$$?IPb?^iFQ_ceOi4A|?8&?_5CE{}Au<2} delta 76 zcmeBP#rSI_BlmJ%UM>b8*bp7Ok(-a%z`?~T#wEWzFDE}Sr8uUrBtEgAAikioI5QIlagANoReRg662CuoL!P%5aX7a cmza|plbn;9n5UZt;^-EX7H4dBV##9%0Ey2TasU7T delta 54 zcmX>mzCoOOIWI340|>O|ZRD0_k#TXcigC#=&&$bAOev1>&PXgt%r%P12Gg4(Sn}8b DmhTX1 diff --git a/qt_app_pyside1/ui/__pycache__/live_multi_cam_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/live_multi_cam_tab.cpython-311.pyc index c1fde5818096f31ad8e2aaabefee174e313a3eb7..ce2322a401788f55a24f2fe4a6d56318545f9cb3 100644 GIT binary patch delta 78 zcmccF!uY<0k$X8WFBbz4gs86A$lbzW6zOag6Iz^FR2-9%T9%xXUz!r*l3JWyl3x(x emYJ8BlNytplbV>Pn+D?O7L*oeY+lW=&Jq9sg&XSt delta 56 zcmaFg!g#ZVk$X8WFBbz4q%O$c$lbyr`0aGh5YL+Q#X#+~{PEcH%QL zyVT0wc)Vxl?DyY#cNZvFa#ASVuDbQ#|Gw}2@Bjbzzuzb-DsbVd`)}`D{gKb@`VaJ? zU%A@DM-SaD*PAZECAg!mS@*bmHfKC%)-&#z^^SXIbH{UM^TzXL^T+dNxp8jRH}0D) z7%y-Wd``4*mLKP5i^hv)i^q#+OU6rP{o{W2%@Zx1EgLVJEgvtR4U7k7E5<9Bzc*Sr zUWu^YXw`TX`>h_YX1_J#HSD)`ycWN?(G9b8<8=s=_gU9?eU2;A5aEUVujja2-@?Co ziGKL=GaL2yNd4E5mv7-;y;zJ~G&tKh-pHPP(Wco=(H*m$ zTo&CuyJvh4doGXeo!vLSk39#X`)3b~A7IZF(XQEp;|JMuWwd+t(DI&8Vj9x

B6(8!qamXr{{$E z>*<2CW8uper-dt#I6m~xPhK-#PDNs|@RdmK{Eak!wr61>Iz1VVPtVT*ZeThZ>7Ncq z=dYxT&WePcGffQOz*7Ip5~f%&=kNXVV` zo(adVP8yj+*7EUB?;q`i^QJ3qRI&>VcPZz~XpT!RxSqFivl{^l$UBnY&&GpVw!4A} zF4%f%UWjxBf8`D##@q#QdM+OFq`kB7P3I$p@yHG2a#k&<7)2fo27fiu*Y*7QSVW9H zFGOCRjLt6#&-X`S*W&XF&!3GyKN*dL=h~;H=fctUg~iy_=U<9XgclYj7H-C-g-GY~ zi__1~qJAc>vkK~1xS1{kgcird)$p7Ujfi3c;_~A^wiV8+uE$=VRM2^+3J&gjo7G>q zvUjKA-6`q!M}T&vD_SFJ zm>1*f*HS7TajtKEZfg3HNRa+KZrz5^? zS#UcY@wI2c?YiZ<;U3?O`KcpaU{2hLYI+W0;?Sq%pf+JlTIM}-PW%B#qCarkXGMGO zT(O2TMiD!V+ZES|LO=)@q3q<=l50XA)m(ATK;oJ)LapSPzUI>!ur#A*UB4DAO_ADV z@1^!P!IPB_pB^q=p+C7?ORgEi-;Mz}FVx|dTr0f(Z2iqFOTZObzw6HqaK-vJ>u)yP zOo{$|#gcQ~kEP#8r9J&_`Q3c7V_XTCU(6@LnMTVS zNpq(cB6AmpMoyl$*8=(+WpmirE(i&#=DsHFX zd1_qge4treI6sv3vj>y9h^31GG6#xgbovVDs#uy=gG?+Ct&}bRyhcByOTjL;c--Z+CyG``bD1&2fhmbq#qO~Z6i=GqmmUD9un7K^q)lm;&v);@X-o&QbhD0s=f z1h+#6a2!;HoXi1-bw6X(YQ-&h(G~E;T+?C@whWB+&V*qI#H4y*uri)ki~(5xO20y0 zW{TsM0VxZ!5o_EsP-H3AK z{!bL%Xvsq9Ckk)0aiQ!Jg*RJ&xkGrP&evI*2D}i+#1=njwu7uTN?Z$e#gFJu7=#tDdkOJpfq}p-DA?8um<(#f1uf2}gjAa=e zO*HmeU)g4UF`tB*%#_EUGr!s~Ea%2Ancp&J+2y>YycMFyvMyUbcIcn8;+@pLWX>7O z`AhjL#7oS&gxX9jfFzk>9(LFTRO&H{tSQoaME^Q>K$1)00D#}Q}N{Jyjnq=i<< z#2%kDac9&Fx5TaNGryWoOIhSQsHXkqck^k9%Ze{%eznJkxsqsFb_oSo64xhcKrjav z7lQGtk>DbDu0gP9!O#m&MiMRB@*rqsEC$WRLU8nEEFPJizK_v=ItT)9`Uc8-`m=Zn z^Kc5l%QY^OP2SZ-)O+wVw$B82d0qrJv@6)Ta3dI-k4_80O}lsRK9~&`p1gKNoL`(1 z+9&6u^9ZqN=Vkh{gD%?wQv9I*ySn1x%T94;h#e|DUdOfAmqSEu8V?5_)9LBK+) zE4UxI=-j(-BO3&~+AGs@UBR7B9~}x|&z?QmNL0A?%k%O0{A^cnZhp=oF&dB<7N!?t zUBTT*68-N`|Fe5K@#5ZBpw-Dgg5}rI58xz9hQL#RFl;)3{C9M8{EV12V~PK4@4rj{=1{%*5HQ6qd!4dT zCA()kjTQuI^xe*^w$CuB%~S7k=)U?e<t$vwg zMiYWANt&34)QEj1Ix1ZmBgqlCI;QA|eI$}FT?FxsD$HVjMDnGJUp53<%yScY7DSj( zi#*a5i_;z873m_Oi7Tv?i>C>$#QK4fNc2;>NYG_Z3;`r$laU3IR*JN=O_u^8%}f(l zsJ*jD&yaty-7}qsk#`I@iNX{I$f4PvmC@ymh#}=;rL2l;@i>4UwupjFmztv6 zQ}Zu}FQWp=Os~G^e2n>(AWTm*I)6PP^q_afL|T`m%Z4LUaTA)pq|48dge}99f@^gO z(Hq!hMM$&>Bc7mSpCqS%9O8nec@`xYV6%}q$e4Sgc{elB*4T> z>z$7Z?15yxX&<6c<$NMB4Oj_i@?~PU5h^hp)gamlqTE_dG^&zJ&nFTQuGXVFBC;y* z3h2q{LMCnn42i7yt6&+TvZ5rfP8Vu#QNX#2zYyYuwC;c@MzKWw|( z1_UJ(VXs@_daU>A#=X+Uz4FHW%EtW^pzcYY*Vpp6vHg!4zS?lBU*Q`hzTuaTORAr^ za(peO^O!G{%6cAz;oz3}UWMFp`TPRZ1VUhMecu6Yj#VzxF3g0L3eXC6^dLV=|Zava1EK;y6sj!Jh zdM9wVUTWPha|aaefW#dzYC`>})a;e^56RpKg*zc}C*ChCQ3~syxZJ+OtGxfUp%h=2 zK?DfQd$Z;<&z3V%rA523nE^h!;;w0%J41{H2l;s!ImwcP{bQ;Ry|)3$q2nLDU( z2PN)cMyl>^l(rs}xo(B)mbh+wf7h-@^ zJa274@%+^W<-kR`27K3N6z&;`dxm``?(JLWl=_|DJ*VtBDc7HrxnYGHmbhV-l@vz+ zcSdEdQQ;aTu8|_59@2NG++gs&uP5lOoyvyJ`@3aszryX8xcv-8Cg)db-Yawa6mFly z?L$thn!Ygm#_X*^JXSYI4Fhu3pi(t>i(lmeDXuZeHGZw>s~rZ<`ElijFI;)!%B{jx zt~|vBlU#76Oy)K#+-8Z}%>2|oZAmZCusoptjBI{pQ2-a!rf_^YgM>biEDjaR`Z3>8=+hIt6WKnt4VS-ciOCDi5BFF z;r9#PwRx|+X`ixbAJuG7Vt0&I$v@mz-e2hYQDH&p1UiKmAT$6 zD>!z<^KM(a{T&SX4*4R4WdJvC=E8LH_t*l=KbWif+ov3cyd zg|Zg}0i;gn%(h0V4HlyfPZkScZB{yP7;Hx5$9SIc>*;rv5CYba2+kN~l9_%h#x-Pq zSu7BQF=}kaki)K-3WLczQ>CXkPEA_7W(-k7CM(MpPpuBWj(CjReu8)mCaQBhOaqAf zEcXd8e|$+9Z0>*Jk}~7@r!FbqdP~a6ptjb#P+)Hr;+JjxC_`_>?Xre7SY9l$Cg`vW^Oau8m&Otl_>-ge_+E?mWcYjx z$}>}uksG6KGKE0m30n)Kbv(P)Or=xaOfkv2MsQooD>73qY#4RfAhN_rSrA#G6@q}6 z_l%w!Cg!~WG4H79GeZmkc6|=Y*g{SGqW%sBf5v>z#KEAIOhs)_6ju76)>%j!f7Zk~ zL+f33*%{?-7lgMPNR&*eR-a5^a;Pp*KdQ3Lp^u_z$i{+D0uGX9b2OIN`P`|Vp^@Ol zp^^U67lZxBde0AD0vp|AbAhQB4+jmsR&c!%2c5#$!N8o?P=M7MTb!JP;`G#FGiVX8B$Ok*fKcg@#RUNj%83PW{t6g6 zv2Q{P{^f9V5iFnC@Qn%WF%HFfC}4T*IMJTTi7<;I z*3r$1Ep`|WD)_H}B}Q5K<*P2~@|1G8N`i+&6_`5)fa+<)8_FJy?`nixZK|kS~88zS6)O=sd(ee%Tt?>1UDG?mH2{6xun~7si4H~sT+F)Q4 z2Q%P<&}Rp;G8ms{&3RhWZ%_0)p;;sBq>3F~D-+9r6P9%)#^9h5``tNH>q(6r%_*nk ztbvB)2va00!`Km2dBIE}51G}5g2X;2bbVJNVA7+Y!N|20DMk+Bo?@s-m%kjH1?yPd zN`UQt@67!8J9PHD1XVMP*~j_EaT)E?NEVX%fLNaqjR z3$WqiN<>H;`dEZ4RFH+r!kIYh6x<;mJ>)uyy~hcqEsJW221ibhnN^5vLbd`?1r#6? z0b_(`J1T<5L~&kBs6XahLA7XBH{^g2)evA1LV@sf6dog?4w2Na#2z^5A~uOlU`GdS z-VjMgA?;V2j)^8+;n>osu5~T}>RXstYx|q(VAGU1@!D*Zjmk5N{CI+fmY?Q)}2sXN9tWoEW zVhv1~j$uF6O_j0lKf4%Nj0k;N8>PKl42?QSrHl2+Mtx@kY&!pHI2Mk_Me+A3_KFE~ zpzGLIqm6Fy`3dABy&>D%D6@2RcoAT#&~ZYo3~Yp&o{WeCKq)F=I#Nyal29dyI9QH4(Wn8SXus% zDXoX-y&3OCWm4II%nvI3pu`V8ZrXm|tL!)`H}xn@Jp{9nv9M3e{27HmBk^anAkWME zC568v@t0Qle#w5XHf>*N|D)|+-M&Wnjm>{}_3qVM{jZ%SKJaOU-<0Cpl6>2}eq|f( zvoe29;m=9@IrcflHzxVUmCZ80Md7zd{FW!2tEd#2QTUxHzAMRhJ=m=rd`3BV<-O@^ z%Jh=VFDv}A#4jVX161U_E7QuAgv@_N;XfnspV6WAr1(Qg{t#x|BeOK`X1Kp_x#n|T zqA7Sjr`H2Qq^Qq>7r20h`+jz07nFl*MP^AjpeEmoTu~x7Wd5eY-<0^94oSMkl4N(0 zq}iN#O44l3QI@10mZTn*q#h4y%3noH@lI)1zg%)mDLMAURa|t%{n%e2RbG<)FDU*O zB>xMm{($1&s`$H7{@$d&cQsJ01UkN(qwMJW(34YI_H!2;NHj{zfUvYus(fDdUsC*+ zB>yGN;|1A2q4*~x|AZ5&-QT^e>>YAOWhMgY_<)iXk^NJOe@gOCfqzcb9D zCm!uqPDYfIuOQz&IenDx9#BeD73b*%E>*?(99Dd2D@RlO@g#rzQ9v1*RECxi?6=%K z1o>O;b1c{>3pUDvjpk?#`=rbdEBvs;4?ixcWFiG=>k+x+s8VwDiHk3KPAxsN3|N*M z75^T^e=6lapY)%XE{rP|=HGiMhNwjMF^Z~mfTDtn6vAZ{a8CA*D*jQ)KZ=+G8}95@ zHtdo14a?=Hl=4$gT;9@ow^|iSpf?pbkqoc`cwRZV093=s2{P1|bBqx8fl@{+6&L9R zE@FZEI8YtmB6^9-ye|YtQ?qA0&|jnvyy*S_Kz$6aY?^_34Xo5=7}rc*JL`6 z`6`7EBG0S-mR0}3@9w()(x2`9&R)rX5ch}q-lF24yWk-CMa91;V1vt+70BDR;FkGT zg>RMk*2jDW#ub@w#4sf}-AoQvf{5`DBX+{?oXt5Ka>@B6|>N5I4v2#BFqil^9zK*_C;*3-H^nJ@5 z)eLs|nk8wp2~(Jp*>cb_OpMif$^XVM8iKVoF&aX(r>4vliLEu-*Xe0@z+>zYvWR2W z!efYIo{A@^$F^p^j1l6gcnk*jns_$pF*)Ed$BoT8)S7r`hGy4Hi~jT}F&d2c8Do!v z1ICaZo5=#7p_Mng)=aILGg^1E%K^8U0)ySX4%}vIAlAUgB`pVq9SeLBQb@ff=OYvMVuEQ|C)G={Y6hr z53zOGwI-fJI>b})7(0U2#AEDadMciydTeXr>Cqvcil)pij z>FV9Y^Xcl{#PjLu-Nf_h>fOY1R4?&0>+5OtZsPe()%&rJP0xT1@znM+s6(xpuj4wz zQ}GPxP;25jp+h_s&q*C>O+3RoL?#~Wt<4ks(NnV{YwoQ*t%n62bw+=l`S>-V5pL$J z9%_BmF5)r6p3}pwnKwfOy*})=RYYlKd9eM%2K}z>X11>>%hsTK^RYwANcY;5wQZd; z_OZD$XY%tp?E3O)tF3>+c;esI)8%r_T-2Y1JX`$F=>9W?0N=izq#czO&d0b8zh%0&=&8fz+>G{kM(cH&SCK$Tk}1yr*r0#{nb@YuYcye; zs+kdkWxQDMawL8oX7(Ttn_a+`xw&|3k*Mq^Z@=*xX-DYNWhN!D2+ed%5KS%y0sr@R z|D_HNaj`{~qya9{a>NN}Zb20l>2SOC$W zBCU7qIE+n8+D0f+pJ56EtoHx@ODq4^gV#}^#>P9fB|u)T8Sm4eyMl`iT4q%Vs-@gD z;dNIK+w(SL+2cdo(!`)@jA%a$#=f`Kgwl&7A}mr9(Ng&IAS81RzYK{k?NmaI3kbMK z>mn+PGv<~oQbo|K+Kk7hPoxH0t5_`CgRHoa^+{F8`$a@5zD3TL$f0pf{O`#jo|^bJ zoOH3honn`ls=lL%Unk}kROuu!Q^j}4Ss~}kaMBx|Qu*Rn2#iFptQDurAfhETDNRmF zg6DLZQLjdGNY|)_At4{ts@(ik9GhPhCk<5s)$EY!mo8G}se(AaumIs}nQpldy<8W_ z7RAHYppBrJG)xzXi*uH*Rq^>NS70DXlgp}!MJHjRUS|IY;(lF=AT1(n~ zzXcIuX~^ekqDcG5FsC?iIS%trlQ61w4dvli{b-Dtj*(T&v=6#ju-ZgWy!wbfM7u>x zEY={|=R%7|x>Sr&j5kv|2dW_B?epPrb6>XsO0+6ohT@ESdM z7D&2AoJSa|IZz{|je4lHL8@ZdN*GbK8>m(3UBE0sXMl7FQEKCGtivHw=4bIws7=>D z9p+yXOp$GbNmpb>&GgyUPMO-Hs5y{^2h^#i=2a3QU9NRD^-GL>(zIS;7$Ioa?|fYR z8l}MtGYs)`m1SUNb74%wRHI?Ce%JQp(j!v8nwWeYvfK6)zbDD>xi5TYR@r;*y|D|**cF+- zs_<7O{wmWN*)8*X6n>Ay?|G7wUj%_M6b+gc|L&B(JL&I!F!sGm%Ax1qd*Mapg_mT% zsQ5+6FEZKqLD}D}_`4;4H+vnH{ih&am)QNw$K`cTT%ICq?bICxbo}92sq#2(**~QC zha~^dsy`^{_o{!>ZJ*-bBvS&KrL3EYob!Q@wqB|VCYf10^WM2Jc|24^fjpeOd_8(IGha`40Y5j|`Kdkt}l0UrOWQ@xEn8J@q{1`UO z77ZB@?@#%UCjCbr29%zO_rjA(I4=7a75}2-Uu1C~k^M&%|51tEYT>&P9Q!y8m%$A_ z?2#%5am)VWivPHjb+aP&P$+u(2~VB!1s2UnlAJ zD!=j8&{LELSRDC4h=(UU?Vv5f_Q({sEy-=WH>R}rD($CaZbacmByI%TIqCDJ6xWvI znD$YRvTa!APAS|ei94lzZcTAJlH88_0j0A~=K2+`U*h_+gC3W;A%z=~xFNRj6FXP2 zP0|e7D|3Ac*C%m(2+BTgNpahg9MrA8QzCOm74E3S9mQr+_N_C;9Y}Hq9&A>+u={jO z;l?Bm)rS0FBdWsfN^u91+`$L^O81D;Jt1>1D%^___aZ}W{r#_9Qvq>G=^Xr_u#$ESwe zm*S2jxg!s~%F#)g6BJI6I6)7&KgAtQa;&4gAafH6Hz9Eo`bS5!T4pxcieg;mo>REz zBja|c!U}9hxx6GeX_)`*pYPE9@OP<7cSZ_8=!Mea+8m0rHaK4|gklX3GI)zWnxX zsb;HO*rpV=(T4FeZkpK7xZkIleY4`R}M|eenIgIl3&n2 z9!mL-CH<_~Ow0Zm#XlqYXV|=QLiV3j{3j*-)~6uEjVVZX8R*EPPN{MPx9mTy_)kkt zH!FI~LT6|edQ!sjz;Y5>0af&c9?wvv>-!ZwWxcJgA8c{s}>9<^Zc+X z7d}6%%Z1Mm8~Y3K@;B{=k8g85Y9r7`+X{xtJdgI44HbEQ!V&0CigMxelQP2klgfe< zC7z!In@<#Yep--==f?%T`4eTXzbh-4IO6%cx|)eD&);?B;`u*p={XqL?D}AH!BnB= zgO0MPJkS4;my74g93)b|NDG;C4Lny?-iRYokP?bl^b zoMO=~A$KW9$osNa$Y1h6$16)a%xbI~{9DeBOUxk~YV;4?8A`u)!Fp-^vey=mZ^`SV zLk3=E!Ls)iuZoABO#QG>VWR0cJ69rTp`U|{3@6Qf=(p2ZBKbI7f~+zz{$4syCC}6O z=jX1?&0n8m{Jtk|f9al$|6|1w5wIV?)6qGV$eWD?!ER*h|3y-MFxiWYOefA=R!V5^ zfH&>I#t)VA!?-U~2pq~~v^$t3>e7PN;{OPL@t5QfMIXzBgUwgE{`iM@O>D9d@(zmw zhU`y>r2jsk$tFyp@2GpVGWh1)7w1-Ms6a}_cP8YLcBQ19M%N0A2DiO1o>El7UVr%ujwzD5 z5iME&&|QZ7@B@&0hjGjN5rsb@Io(>9ML;~os*#J|?kVf3c74CPpy!C^`%Ps%U7qiE z<>J}aJ*cc$_jnZ#nN}fiI*YT4qK7q2OE+UpGsni9&vD)*iKgjc`yYWreX=x7Z=z9a znreFss@x`G_8QB+Mbc>kU;VSNEk-L_4TkNm0v~ldzB)9pwee9)%TXH{YhT)C1hY9& z@feNiBfKQGWj7`jzs?=AruW}at9l8+s8xA=LvEFVzc-aZ!Gnm$ty27MfKe;ZT^5Aa z4rPP5W&XIrAD5hNy%n(KL(Gp@f4i*G_1_g7@~o8Y_dLiw^lQ{U1}1mbz{F~8joPQS zq+K}C$I-0v$JG9Ahk=RK?HX4;#x{-3gF7jLzpM*d)Pcn=w{RR-ESRX$Jt|9=DxFoo zwhbliw+u!T6vo_>Vl)G&mAew5>}HS|RAYYr4Yh&m2t#ck&o|&^i`=J=2N*M;SndX} z)nzTA{~<`Ie%vyDOyQ47PPg6?2H1GOT7o~LB_L78<;4>l2YOM9YnxL4I>3lwqF&BB zYBN>GA%3&Bj7+1#Xe-WwK$ylw8oy5bS%pKr} z$Gu`G5m+JZ`G=~yHD}%|bIG11&l)+)HpA*!N6rid_I2khy97T;&LA+ij|9vRw1wa? z%LoU(W!9N@9XTt|F=T~atIo2BsEz zR-Bc8-;!?~xiXaVv)UoIMm=Pl*aUk}B~TXdC#p3|4%U-^U}?Z9APP9)nyj*fOdK_B8*|MDDSc8`JSbUQaaMHn1C5Eb|5%`$A zjZ&U2WN8VjE142qL^`2)K#qsAX0#U{2^w*VA3_D}Y0&`6EQv7!T}6SZfp|43DsYU3 zF%Svb#rCf z!2C>cp(Gc&w^`ZREpvwy?vTX6UZNhyX_?!maN8tqn?oFx^>?noTw&!lrE=SPvhmd! z!@i#xgPDt`7uv=aX-n_JetF}NvJtH3x>c?&#kD3moPQ&89SYYWaUJl3ea{rvk>on= z#g*-YGIw0zj!WEeBR;8ahmnX6>NiWx#~#Mz`eCIWrVwjax!M%hlH^*DD?Q)vv$55! zOmnT2q&gTb+zzIe#BG0iCaN0l+(3G(I+UsoX8I7P>F6;kTMo(GVTC&^afglM-a&Ml zHA5JB#I4!^v8>r#L?7oF(_mF0pjUEqG|nrCwlc z{FM~V|NU!;AMGIm7v5)P`9ryY# zQJ`3jc^M_K#rnQvEl;A+v#zBaD@^9B_1aGRcROy%&ue|~GbjhK*81jrCR{eP94|4G zg;gVOuP=6Vi9t<^HHg7@TQMkF6GP^451_K`_%n{JVfYRDk>NMq)``E$jNMj3F3Sc? zTN`95p$4t6Qf!7dpRAZN>jmozOPqjT?5ve1q(Xe`FuHhuXDEdC6p}X z;uK#0QXa7^GcBQkuEE$4Docol`Scz-2#plIpk||;y&6=_qQp83AmgD&yb|j&-e`tY zLw1qEqXmO7&8F?Jc+&tLN&5`OB;)g?bD8yubpB18)9`XlsTSMW$wS?D4v4k${oM)ChqkXO<5KpB;}{FTHCDPQHwZVizBLG_)o zC!U;|f?Gw95AlK9FH42>xL0=_c+d)Y>8(?D+F;yb1x6!wtQMBto{|ddaKB%(aV7um z##Bv5vZh0>*`d_zxOHN+r25VYsiXz>YWHr-m+SkL`u^Jmw+mKFH{5wS zS=ua>Hm}xi!u!r{J%Z~pzkRi^api(k*oJ$xXHYu+0yG7*?-J%`{_tvH?H#0GGw#){ zqYpPKU1#<1zXdTqQz99Acu6WA!)-`paSEVRJow;cIJga}1jIshGjt<-hnWMubV=o% z&3D_Rl4jhH7rjw`B$LNb$Rq zJoH*LjgH4Cdx*q#$c3FsVJE?LlSKUtdtxD4Qv87=f4~}x55^wu9=zvOvMhSTkaByH zgMpWKuE|^%1aT7Ag`}5Nzghdm+B?TrrsdKdO6iVUd8_`)Hw(U4aA)7jknHbJ{2jM) zAs@`PHvQtGJgNCbx}SYPd$>F$b5|7ZilpD!hYwOiX4KPRG*9A~&A>yw#0Be{xphDk ztdxN{>l3F{$cbC#cXOp`^EKvaI*iKs%hv((Edy>D9DHXugL(o-*P2@AIp02iWX{Ou zd~ToTu<68@-w}s7D;wMny~OAj5C*gz51VZbeuMMbtREBFn4!gufHdBmXV$0V`V=!P zEJfRCBh0EO|4?3{ZW!m7XUwH+3ZX@9B5;mrsoLj_#1YlkoUQap`1*7-3WHJDlOB3MJx9ZdR)v0OqdloHf9B%xQcwCl`*BcOKI+7FOLHol|U|_6i zH%h+N221h&9rRrMVPRecv>o8wI`+ix@>jf7c(*vI&H(<(+j$sLNLivW#kVARnBLdy zvS0wL3OSve3Vi~HBTzBmytc-GWsJ3C=^rODXk+czJI@(*{*eY8$(nI6s?>b&0VarE zh3l2{J9~JdA%G1}|A2C`4^PeuEqfQIfrBkjz??^SK!K~5wm>o7mn@4xXZ9kQ*EE+@ znH?Jo6x(-eTuV9X0wrz?3v7@uR-F5<>tkGly{I$y{!gLvg@ful%O%L<46N}S~02Q04Pn%?<(YwtauZz}vi@I{Q z*or{R&Krn9)y~^b5s9tGTlDl_K&8jr2*P@NMa>(hQ-Qt7z+O49UkU8LMW?P*Hxen- z#hj0$hlcK7QMMhH{YMo45sBUJZ`pCbO4)M2N)HjO({M+)d+pwCrR|_x*sTIp+P*yWw3gd+~QSh`!Y$B6$94Vi*PnC;`$c; z)r(rQvp6oJzd80v!5cRwWbD)YxPFikx0C*hkc<9+e@v{EjLELP<@3PPE=tzJ8T2Ugd{A^yO9;{M-fS zbNnse?an8~E*=^wxxQ!JXuz*QL$5W(w%F0g}pE^9dx}K5yN2Gxih!)N8%- z*uQws+P0l%sqB-#VL;qyiD;DuN$otZfT`%UvZ<^F!>z}WSV&xC;#+!FGWN^bdshNb zLN2-XSq?g96$Sz_W9`ZK4(6iRj-KgAZfEpNgS<8|BGx=rzYIcM)P~7|RhdDb3N!4M z{l;tokxEzlwRdCxo=E0X#Ltmq3 zyAb~vPTGTW*l;)>*`w_wPh;5lrlt><%VJ7%Y|=#f#r;SbWKT55vymfHE$+{OCQ)q} zIBeSqf0u@hEkJ<@SDmMVAG%6?C*1Fo<;zei(3K2y$$@Sq(0%LJ`y~OXB>4V@jdu^F zHgqI6bliL8K}_Dzr)=oE%{{JbxI6gv@Rx?~Ro_4OxV7!uec$zd$MJv zH4=O)rstd$3zQBGVt5U`8GF~}GX@v8>$Nu4C2|tYV{DUX2D!*=Kw~0{O<$glPRDPO zCMG{MJty>CosJ4vPpStX7C?Y`fq+QXI~~&=^(l*|Bm6Ym-7)bMB-%c26;Q`%ur@z+ zzp6^Ps)FFV)CkWZC~Jh(b*ZY>WL4|8H^^0cl&U=tg9WhebNhBO=T<*aWv4%Xdd2&V z;;$6T!5vC)2MO)?PKEE3*iB@V3jLJ&5|J|d#Iv4>M%;i&#;6+S`m)!Hy7sq)qKPl- zLd$U^!tB`ALw*Tvnnc!#FE1-0@UX`z6Gcfk=Bns72DAh51<8eIxC@3HAi^1-|KKe1;g{T-s`* zklJ5!cKfqLKA}baKTtD#6G4r!a8s(VJz3Z;7j9Pyw-Y6^jV<9fr}$8k56L{(&TW!@ zGjd7cwU75U|7q(Vw|;Z$J6r#B+aGWHX2&}n4|d914?T$g^^L!{@#l%}CH{KpFP8rN zmG8YGojE5TIVba@3O_3Gqfd!IZOhgX!r)C(4?-JaehCoHG|*BQ9&s^(?y9fc@vqgV(;?0!eA441GyAi>7SJ#A=r4;*%Om~YPVASxo^!+Vtm`_~<BYTW8z|j=?a@T zwgm^m(U|J1M!{f>$e95FW{)A3&SRfq366qyycCNid@Kk(rrqL*xCd2-gKIMgH8L?G z9)*+6gN9KIhXQzMF(y7wUS8U+t!+F`7nq4n7qG~fv4FIDDq|D!5Q5s2hL~_{jB@#H z8kGi-5R6KG7Z1@`p|ALZnhkfW-md#n-D7{*8~JaRe6i$fIbY5HM#)!7zRStM14{6K z?C*jV^Hu+bJI|~nKFsyhf(Huc)`?puV9UC;?$-$Z8EbF^=8Iq3_xj<_AHIXlPt8gp z8$f>uBCYsM3Fo-=$hUun~Fif>4QuOrHQlfpMi`W-7nS^scPYyVcy zkG6XIJMw?j>4s}Fm`#jGwmAJfW@I~!VCl_{z8s+|5V~NwfQ2qt%6ACy?=2bDYfyfJ*lO0K z=Z~3|YgjIdH=8~}?NX5=tF3C;hg}_|%SEpg&23hH%^f}QEhdJUfc}h@v_YsNy(ppn zEsy=|EzBZlKTAHwxUjG{^st$8){0?<*@OV&qJeZ--pUa+X62T!yH-9q=Abjqtn%wb z`B`&s=olgQjY3nLmhsxPl%dPa${FK~^#nS-jHz@oOF6VDQGV>k0u*yGX~x6EZXniQ z+S50BAyEp);8S)?#$FC3y!4ayo)|qnk|-zF{5}(#p9>{&JLor^d+f#}od(2yOqd(d z*bRcA-$cQg{(<1R2t}SKrDtMvGWO|$Fhx<}CsAbQm3lkkH{yvxEy?{za+=o-ZczqC z^uyvxVV8yuTha{gP^$fgCJvsh(zFqVg$bZ%cXjQKhj=C|hJ0|3j#5;k&U zOwW&sG>DUk=0!TYXar`ihGrL{m|N#?;7y`HOCL%zQ60pAP}tQKyouxarcI!ZL@6ad zsN!OK&@`S!97S2e1@rCbe6C;H5WN%S&%c|KjJiFC&z zH{$714T5m1-%Y0TC^$b<7mVrr23lIWhy*x>;U%aF#c@be2dOeg#F!WXDIs2glgE*+6fv_x#j4b|$d*6x-?OmPaETa-L)ocNh)jiH1qji^89z%rKf7=RvOt zP-^0H7KC(;Fs1I;>rKrx4o z3|Zx?9N78lE;3DN%G>5Ee!sd-sqRcwA4ygpk*j-@>YiIBKSlAF}jhv=XZ>%CgD z6e&A5SFV-7>7S49ktDl*Us}ay?r>2*ddhL~RvOwpF)#p(9XH_2$Vho?P+Wueg0u z4(w9``=r3W)%wQU1Gfi$`M3&fq@2=Cro&E!*?qrMs@;uS4(w3^d!)dg)%qq1p#QNg zcDV?M2b|+lTBrE8fFrpYXkHB*U#XV@$JHB0zR(Fla3Gw71JJ75dGD7%&f+OLNj7G( zxsAgQYozvJ+>Zp~2ppV(gWHig+4pcv_McGvCnWt=HD8zGtQ+!O34hw8jiZT5P0zrJA=;jgOZ-AVsN7B^nr-BK`6 z;(50-Fi_z6ac(Yreq4|XpC6ah({o2V+`p-Bh5IPKU?|7)sI=wyAVn ztGf=b3DLbBU%M9$_d-}F&<=MJ^IaGkIsNya{R%s?^~}vtu%z~OZ(ks=3m9a{>}F@j zj*cCg-w(bS+__`tp5Wl<>Av6@aefBslJtpoB(tG3m!(-ek73WsyyEemsIxGyvNpxk zU;vACAPY?F=LzX!&s*8sBXeF2eeTo9 zn6ogFzJyg`x0Dn2>*>stw22d7wOi6`fy1LI#fzP^p7}Fl$laWm6qz}XZTnO?%yZt1 z!18IWgM4Q=YrU?aVwbVdfEu6`Z^jl%e62UjX~tOC2)dbVs!#T^k1=~^bOBow+ofe5$ym*;C5~inQw@2c$x1y}0+HLL9SVHRg zqF;NOGg@Vq74=V*^Q;5l9cyYwMopD}nwolA*-`AFK;m}{F6Jm5LQ2e2rv0gEy;yWV z4LIW=dshZ*s)})qg^HyQd$s6~>U>o8X@0>#l;~uqk$17}6zXmYtV5XzS~CIa>ENn~ zTP(S_U~By50+|#hdnF@@jch#x<(c^)))>>UrOX(zLEXS=qDljxb&Y8O8hbKA2>;~m z-~U=r+XAbq;gDQYv&yNXiq8X@lx+U~^*=7_b;@`N4Z%3W&qit~5$>VFtd+m*N)TN%G?M_#bVNEffZLimu`(IVp zF0Ubi#BR&nua&Z@@?(~D_Hp9`Sd&*fB!_*;N{BTUEU7EhJvf&T0gNB3TG10~^I|Jk=vJU5nyb}x z0VLaV$Pe}qsTvsh(=lQC%5>bq^&bbpBmPf7$*iY}|Cyflk>3d@pJp>2!N}>Sl;efJ6t8eRS zN#H}*mZGgsa_UQux!-RNef5&k+;e+`EpL_|%}KS(xZj(ak*?j8rxMCkLiK?w2VPME zuSkJcurjP}k*eBPtLxq_{!+1A-Ktc#rmA-*t9MI#F38mvmFkO9^~KfdhC6w*GHqJv z`0ggTYOhkY7t5Q9O%Gm49XX#oa$Y`iQ8{vvc|G3Pc=!6-uYBp1dvo%}L&`>+C!15V zX|*o+_Q;n;A<8k8YcI^q+BN`b%InUtkyN%89+=GwNmYchseVP z+=iY9I}H@-DZ7(_-S=OR1N};%UkdbV@a93Ej{~(zpm}9dYCCJ(a^Rd2I41?peQ@?U z>G_%WqVw|E1?B9*6PIh#V$OSu*OkSke{{KCcK6dk^z?%-$8tvLSkaf=V|1=4{ixtl z)2VOod}m*(?O?L);DZsl?WEFn5}xV}nDLxN0*&Ov=tp~_pW7!L)mkQ`bC;yf3EXnv zMJ4c}q~GfL)q@&F=^8Gpyd>p7(0$b!j2ueIsAmkWR-(T%| z@Y0{}{oY=wvk$jXh$zR5o2>)$DN;N=DabF~%A!3a2M#NN!&2a|UL76DK!+UIp+G+; zu!F(umjefszyT?6z%dP2r4K8CVM)LBMfz4+q^r*Qa)?Z3+$_;pwr{00pK_z0JfY4K zJBsf1`#seIdtKje?b(T+AJ+BJ+mH7a9N*&k8&^-=@g~osrd+%{YU^n@QRw<nc(oNaDim3?`urYgChzFXQh{9N7 zYCGk<%fG4>5aGw7pT`4@>&3n*Q_v1x6&pRY4y#N@a&|8$mJ-Iz}wns>Q?a_C0cc zf8a+0QfL^rVL24WLo;q|L_(x^iji#SxAz@%{bfP#HqZBSd+K_(cz&=Y7cVv=cvM=f zqy7mV>|$N9*(RtpIvm?(4rujSUw1`!p;)pmdw0P=;9h}nHT%kh)0FsXG^1(PA>II& zi`i8PMqY*xV+2G8Ydvj9w@6e7bX*}8(1>KP^22De3vqCph7nylmxZ!!!%7!0bjJvr zO%K8J4pXyv2EiG>oDm<8x?4A6;YDvdwyjhkEqp~Ot|rOV$Q%qVZ;-eR_Lpg0Xpkh)xn%+JCq=FN1WTHzSA;t{B5 znhM0}5(q>H!=MgICt{EXgVi)N#V=5M$OVZTkJwc+kE29vTMG2hS{i1l#)&=3YCKT= z#-UVsOR~IWB_WsZRLXbW;#LdGsSUKD+EaWm$p=@eWqzx|Zv69jVcA#qzCdudX&llnaF8wIw!I$2Zg7xwK-(`&e&pC;p~kls zY}N*1G9V-5U1RZz`LH+a6*$-(_Pynq1CMn%cMe=(DD@OB!xk`1i6E|PEXCMM1H&)t zX{YO%i=gBMa@UnFZ*8c5e|VpnWVewNB-bt zmL|H-vy-5RK8`QqT)nWUa+);#kf5r&Y0)qUPEToOt8r>fxdx`4+cpWSFtC}js2Z(? z%{lg7!1fq0^DtNRJTr}xL~*dSk9NefZy?S+9XqEOZ<1+z{znz~$&o~nE)*Q5dJBu}ggshb*t4Q$l<55;@mB~Z{vO87w3p1_Fm{x|t~!O)Mfgf2 zox2EDn8;{;mHPh*g`z0aby_SF_F_<@Fx514i+ODFkEP4((5!6l(sb}PrKH#_Ei4?w zbcxPAo7K{T;zTJ=#7Q9qmUrkG8Y>fKhSenrH8kRFsh7&(`ZM??S}Ytc&3cVJL2B}U z2Yn$H0>Q?pj)qNdKl`O;r7hhLrsewMNCD+59DaCMsy}jX29E8% z`0TBbTO;o`?PlTY0AIfm?}ZI3p=2S;PKC)Q1`d$n)u+li3 zYCM;0JSR7fDUD;dPuxE7K_g7h?Ux%5D2)eFjYpD=N94vHrLpJs2}eDG=N=@a_U9fQ zhJ#!7Kd<dzu;W6HPjd1~vCp{;Oq|xoRWJ#- z=eu#as#~eT4rq79bMDpcy{YX3$?XFVuS;j1mA8*8+sCEqZH(WuB^BsTf>(6xS?Rfm z9GFr9Q<8qGd>l7$1CQY{R?f*sZmISp?hMwC!2`krTi$snATe(?x%h3cxAUZurC3e3 z#f{O(^scvu8z^@DxVT_okLSnLWdl1rKi-jxXWI&pMspf#Ke`W>of5Dv_Wc?ZAZ#aC zjpWgYi0uq$y4D?(7=?}1hx0s~y$-R`e+W->3%fE;BOv<+=2vGXjRST=MvIv&OI{9) z#ufs$57&5emW(^#H|96mgp|3lK>QwJFlHn1yX>R}LxQg7lzxT$%o7vXNe#}hIfBn# z@}REV&O2g-0-?|jyCx(rWEi1a6S7z+vEvl{ccHFr&Ffm)0WfOl%~WWa3xTCaLKp4A z99#!u#nzfDRaIXz*^6MGl`ZGKk~_z#zvc{V3tf)3Yf-g^HU`ei6$;Er9<4b;;tSfK zjL`=6bnVDUSEX~hgesvdTD+VGHSauAOTfMgqYWl%swt+-^oObwn=~Fs*dPUrS=Kms zb8!dCjGb9=?pS|tcxdqWSn$Ho>EWKSA!xw%9~(Q?H--=G!3&YA(>Px;IMm-2gqEWi z#zw|zVfq$CXU0L523BSvK5_85o{^s6XUB&6MuDmK{NN?Xu=F!tNjsBvqg)S*Ft0<} zxrPEG9dpX2Zt-_%t}TZ(U5#`xA`suAsrX+Z?TiRvld?zyPnwI)YbW9PCdK(!?0PtQ z4U2mQN>jJ^r*JY(+%RWluq5>aGPVyW5GF)pC1fUE6DZPg9_gHEA>j>%a0`~ zC1@ZT+(Ld+qNX0^Sp-YKka21;tM=($q`ZBb5=^a3J6=_cT!G4IM6>`CiNi=F97w2A zQ|8ABmWK?%n`Wji4I95&gy~}Aae@SX>4KNl%0>mJ^PyUdQ*L97USWLJY-@GjK^hX9 zEffpOI~}vC#b6wE_%)(edNq2b$=3vP3fYX!nDl%^np#kve@P0(=w7@j^9hAdNPGe- zUNS(mG0XmfHhC*mA*robF6mQBVDTuY=z@A$T~o@xKk489pvExw#*Y1ge&BhBe3&CQ_rXHbYRmR-%zkC|{_w+1 za?60yGO)V2{Trvga_aup2iN4yCzQ=6p5%KsLX{KF?LlZvR@6Xjl?oh5297)|md=bz z&riw$K?w*FyLGYx=>l1yyNs+Dc-Se`4&aspgGyjf%DQ!$;yBS1$ahhhRQCM6n-2o- z{1zT?KOA~=P3a$3Uu6E0VmV_=XLWI?M214<`nes>r~u z8OERy)oN4S%-Pi@NX+J3P&3gk&}|u<0r6NyF7tHQ%nhpI4y4TZI%9B9oVTWeK12zB z%V=;J;?AtY1gN3e@{y&F$eN;WBvGH$6wg4d2Z9szXy$wvAHY$|LgK)4r+S7)f)|HI z`cGffr*-TSnVz~y%lHe!r-D!sAr3&SOBe6~5x$87CmbgX6PDe8YoG4 zCh9G-oxP_KivO89*#8DNt%J22Hq}2Mv!6`01T!jnr$H`kRSGec zd3_h$-@bT%OxZIa9ls>Ezo4|gp!%{%>R|#R#rGuno`=oG@kvY|IVAHZ6#j&y-yiTe zTW3t>&nx_SNxz>m40Nw!82I>c^Qj#&L$rjBV00a}kjhwEqC?ugo3=!=x-NA{8qnDF zaSO)mC84dmWp&rA4(ZrsL)=KBA-h9T57c{PV!u;g_?V8!y0F>4#}b`RNY?0wS|99L zPY`P#WT{3cHV!KT1VN56hE}C{jRC^B<0*i*Sx3p}cofn9PCbu$x7PE_qXg7b zFx5j2WNs@?k(IcutpAuN&}Q|Yi;n&0BH6lAcuW7;Ec1g3KPd5otp6O7`2mF=ko24N zpYt+*LE$e*`kmc>XiA~~k1oLZxM}p)?>{=#kv-It{-NHmY$OMX$Fl~&{}riw$^ZzV z>cT%zHIUY4)&Qti!b-)K;nVLTgSRCNfop^*>QbT4=Sy_b*DthjAN1K)poSNx)Sy*Q0Pf zl7445GU6t&G93dw^qX#EWS)sxmZKQhR&r>YR@g|h{WRKr@FUeCZ%YlxA4DOR-cCsw@X>mGP zmZODD!AN3LR!2+l`Z$C2lt)6N-PUdhD_6Kcx5%F?|o%;$N-W@WyhgYHPA;>%GGd zcF9#ol&T}Qjz6xhy_^3w|0RCq8EN~`cXz!T`**wl_P|dL{_R0|@S-w!QSN(2>3c@r z{*3foSl%9%t1m0nmv0R{u4!3WgrUls{YuS#vgwZl6g@@foOf8BdpIMtpQC&18QK4= z;(u1sZ^tc`CmzB6+gZhbR?=@rj!uRZSe(tMj!VXh48{4+Jl1c_ZlRILHSMU5%wgUe z!#92D{W&DoHlh=u|FroKVgKKZQGDP1T&Z{;ZewY!kK!~h-pe)C3pj=ecXnwR+8O^3 z8^xJv5hO#soFyO7^8O9TAYr+F}^yF=?=0DJ9gvE5xepA6Jpmh zkDZcs*@8HYv>RWY@sSdCt4@K2?e|oY|I2A>n{| zo>x4SGWE%n(1d)6?V5zcevq*kVFvkOW)!E*FhM=e(hko>q8%Z(dQwj5{G4{=i+Ywa zlqAR`F(^4YJb8kI{fYAatdo?3s$KAEZR&yc^yF2Vebiu@;nR^&se0-Rui{oCG7p^* ze@b%<<30RadLqpR@sG%%&CDV#AJrAwe?p;KrlBc%r5R1Lm!;+e)*y@|t11OFj*>co zs!Z?@Z;6c-YExs)Yr)hc{x=$(UPr{FMNsNHO;(PX?S0}kfQ?NIhc1QhlK3umMz2z} zP3q{8OL~=(USgQzP;i>enp6ILN&mhFd4>(~$0e1w<4RShv}-^v8B|IJ3Hq1{jj69Y z>F<7ULH746{(i~dui*v*zgsRjq?BL_jJK%I4GjXNVMnUrXtLp`+|c{~H}~y9aU5x$ z-82miG|)f;-QrceX@n4x^~90|2#}BvpcfkJ)dISO+U6lwgKQyWYbJafn!CAO?c8iw z_G!bMcV^Abonx2{Pb?F66T#fc9%f^AwyLi-=;%0I=qA<|G55y}KCdTc9d~p0`?9*K z>(w@M>)qHU`MNsmtIVv-tjaIH{POon)jbdLo|IHQeDOV)33Ny$9i-iU_DNmS(w&Xw z{jui#V)H?OmWy?rQe7wZjZo2QH#~b|p1mtCik=?H(<69#FoQF%JdjVBz%hh%ujn3< z+(UvAWR-RX>ISO{DrJ+sj%Bzw??6yNHLcTzr)ul8)l6`}G~7j#~p|Jo^gEej1$K?y)`ZK((Cz?-((qudMGCi9;<;yl>xP&`Z~u$oo>ei`h@ba zP;&kXuP${6=YPo-r!}c6SCTkwT-aud)7HG%CSF>cw&ryihY~Czr{m2r@jA8TPc!Uf z(1J6NvhY}?kiGP3SJX_dVaO?3w7p_a>5f8q3-F6Jkr7k}xtZ(`ywRq9qa^CNkb42G z2m?k|x6G(st>vu@Ur=|`f_Ooy{@H?fmafI~nMX5|77Z`ljJsK{W??~`eWzZxT2sB0 z(Sv@#xeI@~lb8agX)ROBeh#mx$9xX2g*id8?lMAoF!{`yw1?oaF-@s}s-|x}xbrxq z8`jfb>D6CpJ^aIK9G;_;(ms?$uW|XKKZCNC{u3)}-shCH3}r2Ul$Ew-ea0s_EvJ9W zoZGg{Sz+W1RX$4pb};p!v}G+0Rq0zjEoXJwGd0FEwwR*nsYsiH=~)YLkH~9cy7#C7 z(KIb*jp#4RkR# z$&cN<5#{Ewi5Ui`I2oRSI5ffbZHFNLaZ|ICk%@LbglBVTVb7(JzBB#G5?9g)Q&*1t*E;g&oP665s*kgMs zB75JSj#r-9*AcmgktK2iQGqOguA+pnE1kX>O%$cdJ%vpTGZVM@zoHA>Nx@OR1y3qn zLN$jcjv`IWjW8_(CW^;yO_E5ht%l<1u~*D-kr`uLUi4<<#jso>09AR%~9t&xCtAJUz?bma89XT=Fo@$B29cg5(PTtTz>; zT3`lDiQ?p~)84>27~VuCqFPH|`1(dfL6kuw!SOZwz0joVHhoGDQ>~j=(oiGk%TofT zqe>a^_!J`^P0O+?`2S3|P@O6_Kc+{S;0 z?w%T!)sUt}XzP8#tyncvl_U!Ew-6=6Ma(ngk38G>C8BhOKo;4{PfZErspzM888Ij( z+3dDmEZiv-0yy+d=CAdz(9K~Hi&JzP-UaORcoyx^IeRlFt zjoAntZe|qGETgRAjmS46OU|X2#Ik@?7I@%@*EYTDf5$Ii^Z9A9_J~w_0e0Z;cv?~}m9#DMV#%(R)3K6$LdibGQux`J_gT?< zSn?hgyoZ1N>nA0Z^2A9?nE}($HM>xD6hWOtVIHLoY(4?>V73|kQx(rF2u%Y^MN38NFA3b~_|-8X{K7_fCKjF%X731hU;EewRHJUf0H81I z=ntm8zH@fD^n3pA`d1FE`NhCVDR6Qla5@$^Eu1+o21cd8sBqycQs8;9@uJjtQE0q~ ztB+-zphOx^X2gTz!u1z7uFuA<&k8pe5&0TYMs}0-9?Fis5P2>2^__|3#_t8b8-R6H zKx`e9S_d~;&%|2K2xmvd)(cYW1>q~tORX2hrb|-OC86mOu0Eb|k`iee%-GD&t^x2H zkz$*#>W$mqye-u3U3p0?KPZ(SjF(q_aZF9a+qd7kyho!rP5`)Sl%j?x5n$5-wnJI5ZVr{j)`?$QeD?(UKWS^h>afitA43`txQL!y|(*|9 zl~>KKSab3v@+rS3<|51qSFhyi6_mgxwm&+W*_&Z|oRQnx;&|*X?yYw`u6N>iLMx)s zONM@bat1F9LR!-;ubcImc$Fz1AgXCnGHNFYQB9})1W>eI2;%|1oYHwhG$-&}la;To zJPd6tlnwY^lp`hZ+X4No&FkyXWf}7N10Xu#4&@L3ySh|!jHwyYGE6?BM|D@{8L!sn zl$vx}Pb?WKnfrB@b~34%{6TooTJMMQzpim^s?EoGVnkjJ_4S~iSYtn!p&kk7S9f=X zCJqmStagOx8vx!wwo+Y^EA>m?=DcBr1oBw( zXZB|P4K#_h>D+lXrJ-AZzFR@}f_1%mM~Q;qEff}KX8@-OJHF#qxgC{H)AY!Y7Q}j> z2}j9JUor2)jE5#oI&>F$;g;b46GGW!h+p<49GO3h2LDSb%&%f?<;vM&F5xju&=mwA>nKQ#@u5j|=3eaJc_QRG_Pxs6iTp{Bj3(LUd0k6Ec|2xzz z-a$+9pU@FKAPK^CAbf1{EpnTGi%!xAz`sr8S71xzC)+I;nWnv?j)_~l6V93W>6^EK zIHy!7)@`uGMJYtWF$Je3{geb*uqKMJf~1^7PL!d%&*Hl{o7`f-_flqA$PAA#|9|L5 z#>`E*D>G;_vPit9oj<*c2;FLpRewH9z4u)_2I#%aeyvNj`C${Lg&%lV!r!lWzd|Tj zK=|v?E5g+YAv`6HPD`WH!m(G@<{#f#yCXzjkr_nS9m#b^aNS{7?_a(yuS{zfr#CYj zbFSOj`iy3GWOe?CviX4y*@R?B^bAX$VZk$uDUq+{;hfO8R{=iDD%#05AEdTF%JUt` zyJy3DDCRvB_w@@(fVBhW9dM>nS_Z`GQUb6BaoM)-q{&k=W-mphY(};#=GnES4B@y@&pA>@Tq6pgIxNHd+G!w#Qjwp{=G5(-jy)crTo2; zzxP2FOYiCj6YEVse(~{(NJ9>4O3AsORDngYqhd*~RMHE%9*+3f&0Z3{mnH9I!F!qA zY$M>kHoSu|@8CM{+0IGcbAtCAV}!##oxi=0ynbPuF;WJwcAzoHR#>5I8EXGZw@|zv zL3K%*TzzX1T91f@T~c8eO&_nzt8cZEXYYpRc+7KL7zq9K<)CnFOuRfUT^`3z1kn?g zJYm5qkS`j_`17c|ibVyaRV>n8{kXcPzJIUnv7@I3UmrFU65X47yvgz5k?ykNwT|^# zCw}M_wP-ITll_N4zf8&h4^7J+dii|Ot_29wOQ`&}X|W7T1pBZ9(2-J(!)5>vxbgvz zmu6q*94*f!KSq=rz4QgS(rK0}XWqoByOO77@xYL0_LrJxP*-ZhOv{k7OPjOt4%6Jz z&VShSb(j~n)V>Pi&R2sho~v&UhH^x&Y4%CYeA4XhHT5f9pD@kDLIsA|jN6zyaX=RL zDkWgDcoi%%w0$iNUNYp~0%kjbG~^?V+*`O%t&w{*LZdvELGZPdJ+$LdQUZ|0-i2B# zN#hI~7?W~u!i6ht&dx-`3oTa6eS||tvMHo*nGf3Rzhgt_*i7{0Fb~zLg@l~COy+{P z&}PLai^$1Bk3}J{5wJb7pz+*#TZ!hqjY>)tCybC7(sGhI=G%#KD)8H&Uj@Ok(KzLBqyTYWB+rQ88DX-p~>QFrMI*1uu~r+UQNI z?tm4D0aO8SQoqsKb>R1}7BfIUfoHXBfKO!ud@7^Y_P~C{e>V$IRN1;gD@&_%GYBDp z#jG?N1oMoxDAq&OAEPb*tFecer%@Ql|_yBxO9w9IOq8Ji5C-&@%d;XcX}2OuoJg|Y?H`NqMq;)ZY7Wb#U0eB zaQK)D9}6&x;Ui-+%xpX1CsT37G(uGZl3B$>ssq~aQcCK=vvBZ+ErP94%&*0t*my4q z<28uZsIhlz)!0@QZ`NafNP;=dPVd=-m!kLNBoiJ6&I8Px9rP_NzWGAGRW>vcG-%aR znr{YDAXiN?P1Kcjab>l5ic43c=-g$RN#Z-yv)EI~CW-86N=+O;qq7`(XCfONd2DVj z37g2aW2^Q@YDLN*LQz?}-A~ujggH?}ZKgZ~>=xwBbH&uP>ySEMhNFO6wlgA4IA2bu zm=#&R0x-m02nkQ>36o8|cpve*z@Nz1Jz#t(rNNAo3Q{GcP7tMmgQm60M}D;a2(b$q z_dD&D@3bX)kX{E#&TzX$SC8cC5nMfAf=2zydaJPi(hs7mo!@`${nu7r6E0p73a%iC zuB(#ks^GfHGTFIYFBI%V5M3RTt3z;gU^=O@bNU$W)suEk^5NQo{Jojc0t26?T;JRX5m&o- zCmqiSdwRu=KB=Q`DgO!AC^St9UkMX!niRPk5_d!3ZUEyk!(Lhi-!l(0F?rWnrd<&R zFN>ZllIM!R!k1<`wwdM6Ic2w4Q7FqFlhqq3EG>Gh1?7`5d zZnm-{)f^O_9Tm$jNaaW(qx5jRrA=z-*=V^OYq>18T$Ng`KFp8%nl^kpV!j>A)1vQ? z~;?Aa4({ql!oy zqfn-eAq3CjlL(@FP;vuj)gma`7>3{(J&kwPf2`=;KhR+NNlEvP0lV#|HamX&)SfZm zbo|u8(Q!lWiCo7|+lx=Y*NW4L<3BIxbqqDwepXUQv>|sm&+)VN;^8dEUu8LQtW)5S z%z(+e?USG3b^66lSajkoVfYv7EUOG!Zzxn8RuiQ~nlz}JT(HL zn_B*mRaPhqCgnOWfZ8dbaZe#XrafzI^`tKklr&Dhe=#!+5`n=o$Y6it(r%ksbZ$j; zpr0IA)ugV5H29!K1r)y>_p0tlwZ``}dMY%O-?Sb=j%IZI!8BDO4oqfzN*l4sbjMx2 z{SZ*mt?mpM=gz78yV=0K%zi=Lz@%>vhI{E`);-7x?r&h{lbUn~@)tV$Ww|pcA~6xs zh%d<`FEaPuLAK4DY^RJ>j{|1R%4BLODb_Nt~QN%T+)J>)t zWExMfoym&SK%%h9f^Gbb*-%REUD$6fQF;NF-6iD!F}F|_bD3PrOy1u?HD zenv@zq@K!OBbHj!rx6L$WhI>Q)x=pvgyc*a@{*RK^@8QM5u?Z3OUmY{$+BAR|B3fi z^j#DM`)f0+a_ZQ)a7x`_%6(J z4>f^!`L-ujwU3^s<%=inWT$XJZ10oW`-Ga|hx3o_JiN2M2d2fh!3wk_t%rQAx3ss__Sj!Yv^BS{!SQ%cV_%KqgBmA} zbw8lX)d{i_gK;hT%uG*9s9>E>^_E=(rJY11s+a+)Uhy=l`l>=eaL-o>=MCbzq3IZIOQ z8}eGX5l`1{Cd1^kIhf3h({h#rx+x>%)AoM!J-^xL1<%g5Dd)8Ft_wZOwG~0lT0jdK zwCPxPNO!-L){Z*T@|*PHp=#^Zk3|E@9}|#Now>5w5USCw(HNc@5|<`2u-cG8s|^r* z*tbUT;TC$2C5=J3G1<6Nl@G9x&nz$CIs_n|9294=)`<*1$bxeWfSIzLVmnjL4*i1O zRj|9IFGdpCNsa4#8KQuP{m0xne(d`7$OOkE02pU>vQt05_YT+3bj`>#x#Q;{zIc zyE#iY5FsCjGH7*F)?OkPrF_??-~kSP!@^@cGhiMMqjo~fYBrG-9lIW$yFE|u;-9~@ z#JnG~7_K`Sg)a}vt8>0FS?*J_GZE;EnJ75Rs|8kZuA{QEUgUlBMtI^y;+q2S2p5V( zCnDIDHxrhT$H6c>9u4yhyDC|)NhdB7FczJHs(B(jzayFJ@GX>eY6@6aR8(#XsNPd> zDV-_{S*u5YlAdguZ~aS7?g&#BO*23yuP6|1EQk0~)3@{nr7J5Smdts@VPtd4%Y z_kQ>NmvFY4gQJ&1-^2d52j3cebLi2KP}qtP_f&6q8e<+PubxwZU^J2at1k&n{Rn^{ zXWuHkx`qj$Fc&1x1;KOSiz0_r+ZmY8JT3A+9DV!pTbJLw`sk`qv>icneRm4{B~?cF z+Dj^@G@Jl6>ytcvf~QXtnKEtGM4lHtqmpM-U;&jHY905ljIQ>so?Y#I|FT@2plXexK>2VlNIpr`5chEeBKKy&qk;|J&X{lKO0MX8 zR`NZ2e<1GOw&AXgK|HQIwHA<_GOM!`&B<3q_iK{-HNpKF_OQNj@S6u8UKLB)u$fhG zw<&f>aNc9jkx9H{|9jDuU4L@x-`x^Q_ET^hgQ5UCfCFZY^tI!j<0ZEBlHB759qUy^ z$M-na+wF9`#|f%46DH}GDYIzNz-WyGmzIz>(yF91*zF8f94W}3B|IJGk8u+rBojNi zmD!4Xnnp@D$=jhU2KBfF=|4@1CWREG7|32|Od5Pfr}09;npA3#Wy2v}bCZ)(;rS}u zHXGj0GQI&5AO9G&gm-o%%D_$+YSS^om@{&Vwx%qUpM}r(5LS{kD|a*mlP^}b0sg-u zQ}}+zl2Q^fRRetWpOBpnZ|}c?d0~fkxs(}BhJ))3X4+Ov9Y zb-&PlW;wDPSkN_jh||Mtp2zwI0D&X~J%6{ht^CHGOmeH0It zyM(5ojKd5hW%R~tiHg{xCWjOe%qV|q*M0>>|9f=pO6UQ9>#j{cGLwEXYm!}O8rxhV zJ0`FS&(kOyB6|YXX~>oegY^%&Qzj19UHA?>PeTfHE^BQDoy!so$_X-{G~U$OPm#0^ zQ$VpnHeGvy8u@v!tBpLgazSVrT+Ug}S!)#vh7d&8u;dyRT*IkG9#R{*H|FkLyI^pn zOdeLrXERZA3#q1%?;~Gh3+c4vcI#J4D8pzNrJq(}a^x^ij?kX!3xcQCRexE(6wEI! zP3v;W);%*k0Eq~m{wC+Kql{M9v7KyY5;oi%Zf^7K-~AqEW|1|H*nO7BnBL#UGTcT>P`Ee|mMTf4xWCJ1p%T7I%$EyGDe9i*gWMmn7FE!F4IsGvMoufbj76 zcg5UYYugMfIMg$gr5d&``zh{0|%&7GUj_0rzYR#VLV(#x3lrKjlLa*Fv|<}BDUXW^DPJx0z@ z(W3$eJ^Mvsfnjl%*`lTf8*3@qbdAo+SBrF5Y_I^Op$xAA`D3l~vlz;}5LEDwMMrhX zP~9j+PkHhaDzUcWhfdAQHcGK{=t!egD^=a-8DF4mWlJ8+qKW*u**RcG$+m_`?5}(# z0Efx%?8J+(HKgH+d1Q+wJgRRxB_@v)MaqwSS=FQbbSDKQf}x6Hr}8Whq^->GSac2m z0<-h<<7Ki?WVThs(BmZ)F*20ZbW{U!Rlq-sHscQwIZWg^B1edHfh6)-;>vv_+?0v> zE1zYi{&Z8O$}c(dLPO@MoD=oQao<8nR|lPiW_cBm$@8R1GS<1A25pMUk^KT+MceZK znn;kyJ4CJ#8KhgfPDeDsP_{z-lBO={oW`yuWOonfxs|jP zC(BmXu~GX>toE6e;WYqF9g}L0#cOw(qN>|AsykxU9V?ZqFNxLNQgwH{VfRMEzF5OP zVgJDTF0tX1)Nm?Zvty%XSFC2&%8s=nvF4~$bJQv^HT|Kr9y!a6+LN)`lk3NX^P^(z z1*!HzoNM3UcE`BgE6=U%7P)?j>zA#u*7pf#u?l!z;?Ad%{(GiV-nda7h?NJH8v%b+ z{)|-qOuT;AM*ZGc{a#_;iS=%=en_exidSvlsM;B;+PRVkNUo|QQq>V{DR+rnpTzZT zRuxuYMHA%y$;}4l0Wh%P-WPN4Te+i7qL{4Kyy4yxbMINXB)YpLcemi~)~-d4h=r%6 z!qX7u^WnpVAib~_*&p-tuf3|(<3O83yPPkO=m1Lo?>s=wPi?c3kJ-ZGZ@<}!3?sgF ztnyf$?I(4)$8#J%X)8XS=~&Nn;yAE3;R-S{=U_0A8>A`G{8Sjn?qKjGXj;ht8o#oG z!O-kPFvxdO2R}-rmq;HGdgIA}(jU_i0TS?^5c!ly8J+M#N2L{yiPNPDj5-BqHQ1hL00T5cw}e z)`QyZ2RhjLpqpy!{W>hm~xzeI0I2x zD8j4DXeU)Zh?)^ajN5aZq%6?TvRr5RrcEWjTxTQwOcGy?6VAg`qR)bJ()5z3nhA@7 z;fuDfHBbiyQJE-W(4OIh0j^52@|~kdOhc=4ob-m%P;Um^lS;BOoIPOB&@4CWiW<5t z!%4=g3dxcSmh9doPJ&A(N=0Mcn?<$gihtM%#71<~zSLc^s8{XVodh(Yp%?AdPTKIQ zp>vBd7RfU0^^> zQJ&;>9&beKB_xtXglt!MGH>E@h>*1)Pm4=Dt=J_BQi&X$<-;+BQ!cxOguSQZn#|KoSn1r6w|8z`h!Jy8!J$O=M#c?#5JPoTsH_zM2RV zzrkg)3;iKT+&6$;O4PR`Cu-DVt3#Nh{rgH9?BHB+ge9^+07y#RO-< zFZ0_?Y^hpIvGQ%iR+EZ|wri1RXR-85E-`5-I^oxHQGHGYr;+9)Fqvp!pT9{X_f9&Z zv6s0~N_YV}JQc+q%#8(;*tKBaP3PH2t=|dtnd^`&@jS;zAW+k=9{mspkjCslb#>S? zpmYC6CoY@)Il*>#DAbe4HYRkFD{^GXWd+aZ6%e%+^GzT8RmJD&5Ptb?WEW7;~ z$e;>w*)sF)?Z3ZUC~OuTEs}%0Slf5UGpp|PNSW9W(IIB;k}`J*nY%DGv9}7gfHuT! z+Xa1y+v){&-wY%%F+pPs z)Czd@H-3}0E>%QJP1i-MtY7O^?bZn$P5N3bwsou3)=+D;-QDh<^Im3>fbC!VcsX3T*op3BXE9G!0a*y zEG|pH>aqrGE?XeQm7=t1{HcL7S6U$5l^)1&WhgkUKhu>7d|H2&E9=DZ{4AY6H<0Jb zgGT)bbe8l`yEW#jeFlIm5psu++6QQ17Y_EORYm8IEaVgv=2}$P(F88W=SE zsgG;9pzUOw*&kumX{4XA5)E=yc^X-_b%Qq)lnq-ue7ylr4-}fbVP7anpW#jxtZZ() zzBzO-=nr`W>H3ZPJYi4Zs_XYFzw~xZJzGGZ(~N8|uMY+H`noziLDBDI<&;gn1LEe0 zKkQrY32gK10aV%!pCE>s#jvOdpGLO%Lwh{_&afvOkrZb7w)O}O>9V<3Ir?>7twmWD z9+t%@n&_`}C1x|0Pr)3XU}%~?zs@pKS0~Caf^mdVDH0f$b%u(r(dX-ISf_4%i7rJc z-Jvg`!+I@iq)+HetZ9f7)74d{6Z&Gnf1$4cSZJuwKnra#lniAH>0mNBLIyxPD&3VU zWC6@mxJ`3yM##QmOlWK{tIOzeuAnOr>>#yJ$OTv=wG~WbgdQThu*|Ht|?eZzJQTS$-pG?&xs>7tY~_nu=dbYsY7 z8(`e}#PWtyXj90JC_ahGNT3wwf#`5f0%b{3LPo2EypB(hPg4MLNFekG=WdjKHdHlY9kXu_@+wPo4uPIu*jg$ec z%=^J3y2GA5q#Uam2hbb3g@ij)T8gp7>Ir)MePN$hQg9B$*$((Zeh>O&6L;;Kb$ z*pvdhYHx@HJVCD_Ueac+ zxN)4{6z4a^_)YXXW%VH6Lfj)NG-aI6jq|xNJ{OqE>&MHM#mkl{BaZP7cnR(#Z!)}qiV^<6-k8rG)zICQ zF?4w4M;JGvmFEc4oh)}J9o(py)>Y@2lisb{3VYqIMPmBg`^9w2vne#RKu1^4H9)dXsnbt)2K~OX!kqLyS8!Pqlvi*qWN}!T*@v0EtiX-%R}kc9&2*lf z5TkZLM;DlKlDI24?e&7blPP1uwFx$cDF;g)WW#kcm>I`*gb_4?HoTOk9x0~73sc!; zbaG8;(Jl!72H9I#6sDFAvIm)7%`WC3>om|W78YxKA8P34t9H_%I(_z55G|)fdIV2c zboY=@7ZD}NNgu2$PQeW#X#s%s50iZQdfm!vjv;O(fG>xc*q*)dJ^f$cYH#HJsQyeb z%qDIt2S1(M_4som*Tr4Jwtd{6)SqESF%ddgzpA!*tf(&f4=Oh9kSgM35VR0=!f9U ztqK*y13s@P8(Vt4Vh_3=g2%1QdkJ_sug5QIq%a{lfP0)%9D~e?5}rOcP4D~F)Y9Nc z>jjMTKG-(Qeo9+pbkfxg9h7ZcyIIdz)8dxOxaFz|OXCGgmAtY`I*nZ|x<=t{y$E+p zkrlxhTye%^8@6W6X>}*uY%;(~7@{KAAc>huGCbEB@24LR6gc6&f{}g$GQn@AGpX5A z3~S7ow4@(yg|Nb2G{tC*%M?IsZ*`Qno?Uik%XsR_c_jq~<6Z=c}HFYx8Dik0VtaejTAUmr`n(@_b1iOU>& zVkpKL$pnnXnNB1+a@&F-%n{Uy<6Ov207}g0o>mO1gUAR_ZRBSNX4Ei!Wax<4JWLCPOAkge0vKk-s&D66ws5R&q=Dlm80!C8DZ7mx`{!~@jW~w!m?DlyC zGdy=Zzo}O@oh;Aw9>5E;KQo>IcI>vh7J@El-AFEd^Vp@T zW)x__EWv$HNirnT1*o~|eS@LDK3>Y*%~Hb&m&!bL%ZVph!(JdIe`DxETAGfMBv^B1 z+F8HJP%m|I_p|gadJ@!Cdryz1^+RZP8ZTEy1Bz(`AnV0mpA?p4Z3yx_*$VEeGu($q zgeZch@BngZVNK*{wd&xSfN73{L%-0sZJ!trzr&7|-*=w@!Z&*UWD~pX?$Ir5gm#RU z>fsiVAqBK|bm`C`>=}b2HEJ^Zf=ZI6#8ES*aT^lgiaBj+2xS#}dqO1aR*A`JXp+r) zeSXmmAPn~>WVTfjrc-DB%KGWY zPZp;o86fP19>~o!|EY$eMh?VeK>f>M=IoM*#ak{c-ZH*;Ykcw6VF+m{dFrBK;7UXx z+q@A%Acu5Ip$PGciu%Bc`yj(js!TTd;Bc41nC_~!ZtzKhuge#nwe~g`Jh>Z3f#Cr8 zA%fo_ID`O?e{vMT`v4l8(`ju#Vj6){Hmm(ZaxQsQonc0M6KYJITNJAOrPT3MDVN8M z)t;Tytx=X*36m5#mvxjJzA|Jo0{nA)UIX3qOcl3@8;d*>XICg<-o@eI0A&mA%1$vH zhM-1*^xli{xHI8E#RH(Rq%b;iu9CZsqi4?D1luY0PVQLcvsPBGdJEd{TtzYNCen)2 z)PWYm;@l0nmJU8w%y~Kb$aBNf`*HaXm#`VgK-v)e0r=aUda{%bJzupFosX=CD+KRB zfh0{}T1`zr-=8zflh2+=h@-!Hz90I4C_#54m`OCcXepaV2k%Iur(XC@wswFU)Ci_Q z-GD~m_G7fhkY@*I-I^S@XiSNw*05Q0^n8}VJfJ_ol4JDw^Z(_v3>Y3{26PWH4{I>) zm~&xoh172P&uODyzDSLjRuc3Z^x>BrtJF7D-y=W=QfN%aV1G z9xzCCA=d-9lLx=ydAV4>71gtie(mK&nGz2TXfODvHuDYo+n47X`fxvVvNZRV8dgu+ zUfE?WEI!`hw{P(&j~jTsWD=!<8aWU+T`b2}Z%>=La<$GoXQcgqIfP=bHw(TR8zl!1_RG zRiwNq8;L$QVNZ|xry%nafRjsOBx;gj}rxUV7YCbbzK_s za++zqS@Uv!#d^Kw6}=A1H1;msOa7$1TG`4M-xAYo>|P=aVqrsovGYej{bQPR5G^oXAf|N z=r72p0OS<)$}gjQLN<`48h9@;Q?H8Nu8zhkOCJ zL zC*N{Kxzl@!;OUOx1m5z%HyMaHl<;8gu6hV~`-|x#zGChbZp`)T*IBM!Ln}Vm#0{|Y zTR+LB+2_->U94KkhEFHDxgQ~BnhbxhsaoI#<6JJmv>&z15cozC1RvJL3Kq4Ca=|Lt z<`P5pxH1gMTH;X&O+c8U64K$rMRb2pss@}PZIW|LATss;MsPB`;A=#*2tN4Uof_ju~u)HEfR``Vg zaspYiw?|UfU$eeryR2#J*!~e%1H`Im6D9~9L;i>os^ak|8|s%V>RnvF$TtkqeEX0l z)h9uCq?nFsr>(LTP{8T-!VOQbOLX$|#@{>GTj<%V_3V!x*w*N98q`gqY~AfJM#VWmkOS#_ z7!*_cY2m!-OQ2mi1KV8TbHkwX^xL004U;fkiQ(vlPnTvx&h4l;+4n%-WBF%2*3dDwY+qux<3G)|=%i)~X3f=s5YVzRhovdA%&Z_6`HF#rzRJ}+X7wwTTJ zqgnv)j`O?Y{O%aPTe;jRnBbROpwE1kVNjBFXLa;9E7{tiCAgOO*@dh|fVW#jM39O= z@fP=E=`jSD;*!@8+>7811aAV6wR=J#KWV}`<{{+A2%-oYP`%9v)?&2@OW(lKaRg^- z5aJ<6&LY5LhWr2ldSHS+Q;nX`xsdbG|6Rpq4Yfd7$v4)jp0yU+5a4CE)DHUsGKj># zNo{FNTRNq+>uRT%uK-IH>6TA1Ng%Ke9o)|*K`XnF)!oQWF*83{-N{8$*RX}Tg}P>F zLwHHU=u<~rQ=EcY{V#26*vDC4mhJ{)EwgvTgd4 zX6@GS`VbM7OWWw7^=#&~vUR4ZE#f0-sP%LN83^hDz%_p#q|MzDd;m~RT1=m01Ktt% zeS1hI(iI?3j;NyOrS)v~kn$ljE#j+9#%;4cW?BJ1o#x`_&=QoF0yy60RJb6+v^D^pG3>BEnAU(Ii|B29~}h#FehO13gui$7J- z?JaBtTOGZng)P1sEsenc)nUjyEGd^qN?3tr__+E7MlsI6@+;vJNyIO%CXWL-yo}@r zUooX&S$2wxHnp<#(Y39tP*4|R{_Y!nt(ASIv?|8Tc_*)6h`!p!mPh~E#%|Q<8McFs jc3jP7Mcr4k`pr|?7Bh>>|M!26=U-A=|MU-t6r1@!$Ga#y diff --git a/qt_app_pyside1/ui/__pycache__/performance_graphs.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/performance_graphs.cpython-311.pyc index 1ab6b0a97f5cf9fe2e9429439dc34acfbd57de4b..9885d0034b21c750bd25936acb0749cb9b3b37db 100644 GIT binary patch literal 42801 zcmd_TdvF^^nkNVl4;H{836kIgBte1?QKZDTNIj@0MT(MbN!G)XZCYwB&4MIKe5nA+ z5)Hb&y^c6`uVR~WXwFoRbOX<|d*Nvo{e_1hcBL^7-YLUuJ&!eP90b!a}DB*U$dXkFNZS ze`YfM&-9YKY>9_2E6gU-dnUm&Y7)#s-jsPdZ!~Y(GHRK&j#{T}qqb@LsC_zrG=G{K z<)$5@j%nwp(@bG4Qw7ufC_h~|S~y)aS~OieTFm^dQ?BWf(UR%X(bDO%(XwgxsC&A6 zw0yc^6j>pjZK`s#3U~XIXS#Z{n)&5V)lAop)=qmzz0-B0b%}8GqxB9ohHum-aH9<~ zRZS*;W3x$gA%tVJA)ENu zOa^Dg$(hT5+|-nBKH&FV8lReaeSG4Y@5cDlygx9Zwyo+GBwoIJ8xQ{lklb$?H3QM} z1oNmxC>Lygt6;}}zTbAqzGS*&7Pxl`N9{s|P~gu8!sH8ueh$cC6^i}bC8ywe$2RH^ zO5QPzI&+38Wns!#7&qL4CDTpwC=X<+j299v5oiIKxho5YhUNpo*=f{;8kOOaAdbI5 z)k9k(kS>8}3{(6wR|xq8Um$pE3c=zwmQdVwp6dRj9bF)93r+^7{PFx-V*nly!8tvC zb8K!F%@&xjCg4Fg>i*OfzB~!{_ss8^f?5YMnSO*1Kf%9xg>_$&e(7P%rjR*g8p}(5 z59Q%a>uMnj-n86<$#y~DkmVz@vHK#VJ(+ga~DESfa_$L}J1Gi1#8hx1LvruZpT?|Sl^z^cmADZKKbi675%oD2clU8y%;E0s! zlmC^R-;i^HoSWqQl$;O9`D=34$r0eZ^kfDD!}?si&9z_GSj16$L*6^vvD#Vf2|4d`-RM^p`7W2T}F;z<_V}+)ZPcMJ@5R z+PEz+kMT0jjg3u$XpfDF^{9a8Bd38JYO%Qe=9n-Z9FN;?srN!QI*v_`i`V>OAYP;n zfZBU$YJg1oiG(9@?)A%KtfFz}WpPqSK7&U36VC-|%4#6T#N2$s-*H)cbO!x5g9&eb zd}0D4SK`ffYixRQCQi3;jHl!C!P&9E#5l;Tmh3!@dx_!(K&&UO_yci$c_%=+FT<}W z;18YvNT%nf&rUA*t+W75X zpBoyua4rBqT|mYYQ?v8Jg<*f-T5xvm!s+0Ji7Eg1Ovj~34B#De7^N><4~~t`&5g}r zW+M1|F3e9}&`oH@)N!VB?v~hx3>GR3Lse(86>IS=@E-8->$gphxWez|-_Bp>s-j$# z%vCE~wZv7&%Bz>Gx1F)dI^6hJbu(_oU)fESwc!D&qEoKuR4O{*SMaE!Cf&!|7}+Ly z`(O_2eqZl_$gQ>oiYUNw&zw?-DE#sRr;KxrHxuX-f!sb3Mpy-dT-~lzw^K35t0X)WDU+)^lXD~DYzd!@G|8S$#Y2THN_sWOo_57Ui4{>|W#Ng)4!OEZsqSKl)i#EAL@bfha&4DV z3onK9Y}ic|mH6n4l~=J_H36m=-nCwqTddwk_u5!v6W!|{bNo726Xj}FhQehsw?*N$ zNZghf`Bq1{>J5{yFP&&2jy6@u5yz4x zjy9pl5yzS(PIA>_$`Qww5yv9BLYTr`)!-zU7cf9iQVN>FNEv2RTZ-xR(s^Q{t3kCeUZ_Q4I4!%@Eyy#FS9 zcvM-v(szHaT)9=L+`4ou?KM_hxzZXfZj_1}V>M0TD;S7Yo8{`A%lR?ymPo$r?OL|p zbw2Xa&~+^G##-5ji*oZm*}Gry?x!!y&c~fQ*Ghgo6`5LjC6agV;>yK`7WxqL*0CGM zf4|^+1>Y^aUAV;lM4EF>5UwUKBG^P;zB~ip&5-@{E}|y@*$Hcw7>uA%5jGt|BdnW_ z@eFdGUMhjsuy!gFUF=O;VX`-E6$#>@kz6&j>Sm;v2jXG*wR;G&F)>9d7zts-;bfar zr35ybu!%Vdfv)#|3t4?=W<5MW&FEA6sFNkYdg(%yv45t%Odx#VT{v3b^h7z2%+;We zNnA~$XQunHe(8&HKACG&xJHR_TeZLb&gW7P&CKcK=ENw`(r%gU`g=0fE z8?MdC;abQ`4YBIbjUkqJJ6d1RmqEy*;XCWVO#>mj(2umKNYan)8U4tLeq(d3jm^=g4mNm58(q>~W0mz7S}V6jCgh5qrK7Q~U26?L4n;yMRpFzuXS?Lt z{;+Im`1bRUJhdyQ!(3#S?CX_1I~30j^3Bm@vkoP#|GA0YlU)1F_9O>^#wr zuP58`RV==Qm#kRKkd>yBI=U}j!Fz6k|2BoD$*_i?>BOC;V(EUr6~WI=*flbPvQU?2 zg#Cx$ju~0cAyXQ;$_wRXAMS%lBzdLQoW|IUbw@d6T*h`-1Q82cGXlXQEu2*_FWN#D zkTz{qvS<%k4e$ASwlx^M_zPKogw%hc<$$#hmmNO@i-Ck%1#3EE)VK;l4RfVrH>8v= zfGKQb$7ZCI7uF^?dRT03vJ=!;vKW3iV>@KYDJ$8>fTl6ZjVYj>d10)(Gq0dG3IvB*;?pzJpGX9YIX`r3e_PmPs`fsGv& zGn6}p&D0~su(t=zf~Vm6eXCINAaAjFrWk(q#CIE>@ddtCJeAT@&K_A1DgtHzD?-IaY%`l~ zUq&n)l}TZnadgPtZ+{1^k%m`Bc(a`AQsqiL2^@`13!o0h)?_hgPbT9M%Ce-Lm5K!i zQ;|`_hUA+uegXA3>US7pbzw3oUSl%8{+aRrI5m$>vP;#sRFp0!Z}&)an2wJ?9|GPuGS=}R;Um;YBYdR(5KJ=mP$TL1+`tQ4Y|VY z$r9@Cp_)+brOaJ~DzP$D@{M9R1xo_PdqSSzYpFc+a56$8cEz*yi4Ia4rW_~c!zBVUzQpZ2YO(T6%@ADg|Qx=?+{moXOhZKl@xIWbyUgFv_| z2@Cz*NPP_9b@P-|jHiU}Nrg&18N&>5knK%EWfODR9m%lzyJ7vIOV5an z#y5RqT5DvfQ$#?$Qh%7+SsLKa(nLh9*pZyyeo z4x5(E7aJ~MTwFv+7Cj-4P<0J-7rX%MVqMkdxHERV?MxPNv0*bWm|<6h@{K97SQyDNuTVpl^fxny zuKZiBMzhYhP<9oMXmx-wDC9|>sVG#y8fQxq5RF2N)b7%vq6My4!y!c+Z-W|7~>(`(C`^7a?2( zIbafk_4#i=K#2B#NoG^Gvy)X)^9fP6C)Al$0Wccvp3r<F0+S1u`DQ>>DVCn3=H2EWJIX{7~>B&BF?Boz5 zCvG`<>KtQ`i!=j#@-aEIp>?utw@CaBCVg}U`_F#y80r+~2WNtl<5QF40VZ7v!PFq6 zd$0H>FJB3Y#B7K=A+9t;B4vZYgy_{JNaCLHlT5D&S0t83+>THX5Ef~D5YNX8$pwp4 z!MOGC?355MxCDA4wLAd&!UZ zMDhIL*&unvZ6b+RsVw~=`eF@?-+-KR5dHy@%Z}T{*&y!XC`In}%Biql5+ZbBWUOW@D7QaJ5{|NCb182rf_$!AC#+=RFsgAI2p`IL9FC!Ti;|2KPa?~U7- zXkMTZg-l2uJmr?y@MJ(bLj5qE|DF2sT_hN|jE?iR>GM+T!{6z=+qsk<yx;?n9Kd1^Ihj1{w}{{iOLW^oGshk@ub0Pg&w2gXV4u_k_nIC6N)iY==^|V~LA#u7oVt;Da0C*CUOQ*X8;i zr5>UXy$aVWalM-ffufR#-HQ7UJU{Tz996mFNq?SkNe!}a~5 z?-eZvl}ZRtv?*Mh#I>QG-!J}N@dMk+)kxzX`9HcUSM5=%_Q>3T!VO5=K&-rWy}UJA z-YS=GQ_8okm+y*}?^?6|iBm2gQp$&xoD5Jb_tKR!VfPQKWv*S}+9j@C%klSuE5r9+ zgwR`Mmr~g!bKMHpEpgqcm=)oQNX2T!TFbwvm$^ZO8sPnnd*{QJ&vt%{ zG|nj88HqcSk;l*SehP5!RVw$&+&+cdCvp2eFR56*zS3}~=5Eb~DbF!vR^6q+n5$x? zJyIf9Z$sItJ7rfF{P?nUzBbC&uAC2#-y20+9a$au@Z{QQx&6RH$cY?Q_`?$WsmV6p zsl8iE2@bPlhs|oT+aucYf?MhX<%ylSShs1R}qUznLx(k^ThohG{$k1JN_xW)154S~v zA1%m@d)CGuHvF+)ZWw-eU2ZrgyN@j$iMgv+rcm*^&eamRu2**NfM2nD#rnO)rNvl{ zFYLZIAXT+%zh&DpFur1?;oa)xYJ6WQS}7vG$H~VO=uU@i5%c?wNWaYYD148^ernD5 z@A2O)x?QwXM2N?*F-J8U<*E*)szc^F6|Pg_IvGCDF4Rin$|XSenoiJ1O(~!gQb_cO z2Td!&{VBP6yHZVCmyvPV)3sW%I{ee)dOV{JAafqbVoIC`-IOn03f_KGE~rroYF4gC z3w&~c?}M`u_eVAImVRYR|5{0O%YeKEeWP+|Bv#{HIv(@ZFP)54*Dk&AxV8PG0?9q_ zdqXSM`vtPQS#dW@?&h@;0QV`Mwzfy+|8Oz9xa?o?-8OFL1MQ-Q5;-w?%fZzOr^h+BPD)k1Fn?lKW_^%tu-5kGbpD-7Qgf zON3i(mA3Ac-TM^xKFPiBYrxG_w!HJZZ{B`$WjtD3FBjLxT$S$?y<7Ak7#{xNi>s&Q z`dv!>F4?tPaqX5|yB~S#9t6TW@4b2d&B*C{-v*8-o~{j(rKF!Z%Y!j*Q+V+Ht9M40 z53l607ohi%`xg@)?wWOXbJX2Tvc!=ytEIBLPjUB2><4I2g}az@$GU7@?m-i*yPBddK$3tL>4{wbuz$4yjp5p* zxOPddU5_b*FX|#Ww!ui=`y(Kd*H?Q#ydk@GDz2Rp`vIN}Xd6f2{x8V6!&BPJmtxzx zSC9O-E7FBt2fDNt{FAppt+ZB0cK6=8{}yPm;_8%KovbjF)dv-SWRJ}MH@AL#>%%uc zdUNf_zkT6fzwpN={^Z1G{r~;IzaRK_`~G5|^y&q<^`g>xQEvE_((ot5?sGYpX5T~`;e>$khN3ihND4NL$DpI7C^Ll zm(sjztwgThtJLq+;_K5V*s=jgb{$k)2PM}*R$>+Y3@CxsA*9f{S83fVH|$dyko|te zwO?}WXX)r#a_8#chXbqE*BZ2(P}q&5c_s)|7Wkws1Ndz$n3IKO(uHjnkv4lo+A|Sp znbT$&5l`Yi(pE?+Av4fH92<)}$Ht~-h50FZ=Euga&yPC9@n0h8cws_EV!}T)H8utTxfkHe1SJl!`JzaCXy(xB zD*#9dcmr;?ZWVpq{607(snUvNdbNzJ z8iUfLa8VawL~QCinVNaDz_>zN@n@fHCr>oJX`$&@mP8N@DZ zQ0uih9fBj+k^F`dq$&54c*lKG<4B2&kj5o{a`IOoPT~#3oTuo&d~n-$L=>T(9|(?% zK`@sx)%Sg*)h|MmkquE|6Qb$$HC{c$hOGaXEDQ}pmp-({E#o&X>qJS9;Cow@NUThC zj3TDb{M^|5q)70O^Qub57|Sf4PdC!q&!3o`nIZN}Vd6XEbfU_mnS|ZJf{;J zGaIW^+;QaQgny2RMBFw9wRjASlsuSpRA-h%ynvWrfglqLSCuYHNc(XGFGU`$mj6aSPdTbM9` zFoqHY1DFOpPrhcqIu;Y z;w4fv^fui)cK;ZOoO_r&Iu?En>s(uuYh%kRncJ^$`z3DwquR!&1ss;SBMNs!;*M-s zt&XE+XnQEUcb#vFLg8Zw3LgsTe6$+8hh@G^;oBtk`}$Ea(-S!g&Vd;TsO}@aL@GV< zumKK!GC!j5BT~liYg`)^iiL--IMcRHse0Fg=KF2;w%^~rvVFCWR0wHlidt4ZYG{c} zNDaMmL$A`%OI~$Zyr2z2pQ>Z^(8wfp!^C}qGr22cWfhOgs#ePH?7O=U^7F3qXVUy+O&80(oEkECD!?R(KqTzrIQp0Gs_e>6hX@L;b zDvZ0D;vNukgj6rFSO2!T!O0MVa?ZGn1Q5fSmLJJ*r0;Ktdy%3xsEIrEZ<{WW7C*eH z5(yUUHYEO=Ok~4^-c%bVZPta>AyVh{&Cex>mTFWaF^8B8z*Gn(%Coopg81K}^5VaP za}vtL`B%mRMVRqvTO>#d2`vTXJ!`Qrr%p1|c$Tn+24mZ+;mcC{+5R>{?x z^z4Ybz)I^vxk162fo8hC;CXQ_j~LcC(`dJ$Gq%X&4J=7;#W|c z8l_L}!rCMbAkwZ(HJcy}b%{%`e3Yhni{VM@fH+Qf-X47A+Y#6X*VkqHZfu?&ZC z;szO7Cz}dr62p*F)|De!9!w(Aq!XdjNd{R87d*#cW(O8#68#7|^sp$yc3@K7NH;m4 zt!y?YVABNg7xemPfqu;%P}x z#FVv&0Uz5hn;VBcqu^8XVrGQ0`C)h;K>@Xy6;od7m$?Q^nk23v+dQJa(J+5V&MP)CkJu;zPcLc3pqQ)}6q6MLIACDt z9Y&D<$)Inr+4OO9-e9Y7dSM4x3C>^s1=cLjP#!Ud%wYgF^Md&?_LsixkMMfpuSLWZ z0qN*f26C@a7fEGB8~(K<@E~Lg0p?{Vu|bWfnVzyvMm`!T)#tP6*c$28$A(3s^tx(_ z2I&LDqQx*+rPXiI8nWhqzfE9&8SuB}fWJMXOxg-Cqf9nKnbKCUFkL|Wzr+R0M~1pC z=7;jLYxY8U#t;(XCHCyY4aADlLh4mij8oA?&QO}PAr1qlwl2q@uRZ4|VcJ?5CFF9H zP?uIlS@qCK8#qxO5*uL`%A-B!C=Z{W_EXBErIiuKo>vpsBKjpL9kCEY_AxC5-r zI#CFv;R+TCTT>|+dX>h%FqRi8KaL#GC^Uvd+PN{<8FGeoeJ)i7p(s5Sgv{QHi_=3I zdPN$pEEX8JF0g?mlzvoZ6a&+9UC{bk>2s9Sot}TFAP3ZSX{E<`O3&d9J*V6l6J@3K zD!Wit?KuamDvYtwbNHNP)zZomN7r|fxu;Vnq+O5$qci)9GY3Ye^-HAb$Wh+3c77_> z6^06Pv}GpN73P3sWja(0SeMoZ7K;p6S0#9|Vx7j}L`!PVIZB#|bwxQKqf0BJ%-&qB zw^(ezx@u!gU>$8Tu?uC@o^!ye#uyu|Se&!0T3T7+Y!2&+a$sEsod6bOr=yuzkPGY5 z+WDzi=YpC?c3M}PQCC+ENP5#l8n7-+QDU*gfOU03eO9ci)wQJdoTH?^^!%ZSl6{2H zslTMhF<@4jz%ejO!%1M4_M8JAjp=DWr94_%8F5@W@Sr3kWG*~NgFK^Cv>%*tT{a0# zf0(();s|F~QESd9L9P)!ZAOI>SaQ@VW7dn1zKnvxL)ap;7+IJ|H80$dEo>-lMvRc= z9GH=@!T>uXdq2#8lL6b)`rbFDv44N*<<^W={|8Ag4XvHl!!l@F>oaKE--1s4U0^V? zo7#3#44@M+?%GS^e~m@Hk)M__RGp3Ulwc>#%+C0;aF8}<5jADP6dWaA!ZP;Zad7~$ z|G`O^{LGedX41Vw^p{x*@EL}OYI z8J|@Q${a@LI#ii+@Y}lo)&(87G7GqF4y3!-Wh`#a*17^C;S10(_-)%OS;xPl2K%PP z{B4^ofqs9B7E6`%ch_c2r9}*(!_V)Bv+)OaKk$*+vW}OqhwXb#v#PISp!!;Tv~5K; z_Ro?Xecxch(*F4C;J6rw1u2+x1Ylto-Hj|2`!c`2!Sq@N(D9~(wC_ol`N+uduI}!% zk4a8*h5_Uh^LxN|os17dVo_RCzikpm_rBJ~V2w2c<7C7CbeTWZP%m2v{hA zVc7v+_ZDBscV_SeJuQ4?s6i5N-Q6R&(g4TW$y~n)naW01 zQ^`O1HHg#rmUQ}}9;ETq$~}_^5J4bPU;rRhj;=(HXNbB3A=$r*tZh$;(c@z=dI`7b zKM^AhwJ8G?z1fCY((-$v*JhL_tta$6WyoKsNm1@3ttQP$A5rxMKIzpu9cWV}gUuo> zoWx!@zpfv=aT!EqIsp1|V*F~7b7>f1{iH#wyp4Mbc$9^Z&q(2SjX+SiP z6KW3~eM`}qzYmB96e8pUjo2*W5z-}Mqygb6h=+03&_b!8+MM{Qh*vps>MSM*XS73f ze9ukKjYGoDf)BsmHZ(8NuC@;^z$6;Lw15M?4!jSu({}K@@V^_%p~td}xj-f%H^lwB z!!m#ubozIKVP~xNq zJCh{{Joz!8`_*5=*%z3D2p65* zJ4KeSPnjg@$mH*68ptL;J-hZKCqLh$Q$YlT_?fEnzu;*^BvTfXXTZEzXiS2E##JI6 znbaa~?ttny2@AwaAvZjPRSSqz-~`k44ls55vmjzXGLg(Uus|=f^P&KMXb9kZtavRV z9@AF5Dw6~}i&HqwW#X2qCtsrZ4hBy8{Q|Vrs}bvbf~%o@O~fJL7LjBuMA}*uN#Y{z zfRT4KXOV=$;)NteOmf3a)Hhy+^UR38l12|?2USmslQ^Z!f$Y>rktDq0Tr%&t3tti^ zq8w8#4bWakN|cxhM#syh)Pah}9x(NMI)o(}8@@=O2km*O1H&E8MWe4Kr^tTE1bb zZ-N9;JxL(J!K55zF@A~vyrNd}UXUv;Dis$Kbx}*= zpprPKBydQ@yKuv8a+D_^@uqeZ0urujy0dt95ovjwq~=|+cemo*t;%)NF$p+5p*702 zZkV`jNRguy;ouccX)48_-U0y?_1{xe2CorjI?&i+-`;2 zEpfXabM8c;Lt6Vhrp6{y3oBDvMHtk`o_x4QQ*=i;+};Og?!P9NwJBw73F8Tb3n+pw zy$D}b;K-kQ+=+2Bh--&Nz^^IFH6iYnjJTzy>Mh}m3A@%spL@5(c%0@? z9jmO~$hX!xzc9i16*&yD8%~I=G1zX8a@!-oCC$PCu!7aXtu&;JNPG@#Wo`7RsZHAU zg4}dmX*&Mf!8Jy3t&eiBpOBEuOF^(!)1iM%fS^#}BAM$_xGst7V)iANRNsEhC8RbjaW3Ch;gOsTy2!Ah1UDGG~dt5>hA2nyMKwpX-kstxa>Kh zcur6oTWwlLJTQ$|EU3^wvCg-6U^ zSfI1~_4?3KA43b#YzcBFbT&Q?j7qDbM=KACG(xK;_I26jgP zKW4e1YXyHgfB=T9-EBGyP4*ZZ7^WTMlJu+7dKRTxxz4E|*43Z<0cJ)`X0&qYef4%D zcxHhD6$);O(}x?G+^Sm5BxIEk|9BVCY^g5lUh=i2${rY!;WR9 z0uaokW|tB*%1Q=suIAZv)g6m*C!U$g(^!l?YSz2Q&p)6VqGWiGH-C`U-pvO5)A2(G`w15pYb$wJ~l;DKo3$W|ic!v

pRbabcQi%O?ksbhPAg(!^(>W45uAg`=dQ4;yz(2F_2imETG6O8`Lp8*(P#;0SAu zS!5&gX1^fcza)orL>6ppQ%rn=o*mlO*ssl77Phf39mHK^+hura)g59zKcgLT-Nu9- zaN;&Q=^Dl>&LFkLMLHNY zUUXwp48lmk*tCC|G>G#PA5^fsigGj9FynvBj^F=4UQu!goT5aI3QuDZ~R;g-Ruj-3d^~qH`m8zY~gUf@D-POw@8#YsU1zAFJ!&*{aAxfQe9#g@V z?WFctsp<~95G22_@ljo~QrErKEY%&9>kcY)2bT*l&BpPjvZq_|bT8Xub7y~ZexHQjx%10>Qpc)0>gryt zU3*hH`CMz4*!eK84>W@%x};URbVT zOA4Jzj+<8nW#=)V3m%P-dR}(FptxU<+%GU)uEKH@dfAF5|4*jW-?BCJhfiN`6r(J- z2mWhxmhZG2tu_6)63)jK=6bsh7n?pQGQ;PSV&`G6o$MR`8eg3q{OwS(r{AsQ8Ig91f zmeOMfET8V_Ikr9Tvu$QPf3`jESeNay4jVlmupx{=t(@j67W{vC1v*d0)8H^`!!b&1 zd_@5D%08nM@+Q*`LRiF^e^--{a?C>oOGYf5Z~x3#){I!N?vbNhY4Z@ZM!7>@POk+t ztrBspf;}Y=&gKf*7>hXv(apq_nzv@mIkXt+oFimg#M$#9^9`7&7TZENR#le+G^XHN z7s@x}ivx}TA*=z^OEt3(?HEgr`g20soRA$KqNyL}upxs3WJ8XS^CM@v9+`dyjB;pN zaza6>jp6p8;K8zF`N>RxE){;0sqmCa$RX}9#4fy?ql0suLh=0!*>MqUp=Cp<8Fb}{o6fGBn2bYkgz}Fv3{mH-Z)HXuicwMxW5b$; zS~6$43so>8?!gMEI#l@kCZQ(egj*Xb#%w-QpDq`z@@7yU=x~S!LM}r~mE|>JbIq<&j+&F%gcKbpN#PlT2eoqQtBy*ruZ@PUg{ioJTiWUy z68<`Uwj5K{6r+!H%}(42H#16K7|vvV3RNI6g3%_c>cMWYcsQN%#0v?T3o6J zf!o-2?LwhJI!0Z}7N^O`EEMR3WEPxwNr*tkliP@F5v|&r_h2ozU}NH33(k~0*FqtC zP6~9*lM$%w;tfw1iJsDJa0fZzltT{ie+QR=UdM!@uj0c!Z3(N@pKa|q{OHKo^4j3bZBq7D$ zki(F1gr1I)Lse4^WTe*hWo%t9Zl&0)Sac$+*aj!=Qo9c`P{w+NNaB@oZ8IA@mU(Q- zvv-&-OZE`0SH#(Fb|REBkr?n@?4m4oQCdz_41vsvioN8)cE*vIildB$KtpAMg`mHw z?1aC-$AvA=U?->~;bY7q12;j=iH%@_nWJK?LCub`7+<WQ_3BS|hOUJ1eP2sCjG1mRv zv4-tZ`$47Q&~j0%tl^2u0z$EsE79*))zFCU9}+t@B(bTT1u0mWRP22+zyzjz`@)p7t5X8)KziUK1nM$+pOTscH{?FnA9G z?^OrXH(0=DoR#hCRo&65?%3A;SX+Orr5#(pz13j6^j2dx&QpWkI1j8{!}+SnR8_x1 zd*-XV<;s0Z1gQM!(qdwTpS9U+I>^?2`ol*ME{N=eaPxBp8>kIHlr7{9@j%5u4)eTz0YsbC;aX1K1;ne^|Htd{&^w z8oJjT`l1bes}~;bksD4Z4JVeLN0M6;t%K1q_gKSZf9_+SjXkqE$VsE$RUP z07a~;pGwn?_PKj<-Q5#)_ps(^MBBt#wo~);rZvxo%hcGD$oJ*7Ntl{f8jhiaY6{z; z?rj?;Q~RmBwE^|6ccoFm@Ou3X{VSe_^G@dJy}mG+%1`HkvcQEuxiD?s@&^Zga3IpT z)+pEPS8DddHhVEzi7PCP)dOBieHXYlp4N3wd(_h&d1ZB8_Uuel&|DBpq|+>%-I!=Q5X zRpqFkm}9fB8AxvI=nKlxSCpf|h9%|s_{ek0k@L!tOB=S7Cm{5k`Gi?H_NsDhY9l}O z0UN`VGaU3YI4)B)h!F$Q%d3;J=aAw#1Zq*?>n?(()HAv^0Ed2OUQr)p*K3LkJhRsx^Dc?6Q~0`YgUmOnk8tt(`g7Gh z;vh4Om~BbV7C(Uwd|JOIziCW?Ys#vC6sOQh>0zR7xj9( zDu_`ad8Iay(%CCxS`09Fh^3WrWo0C<{9mmwV3nH196lo&Hwu=%wMdCs7O`BFyX|nGN#Vd0_a-r)Ple_d{^`l1x zP9P()(pKj<$Y|Be58skXU&c@7Us3p1B>oi|*xk3kEfu#sYHayo+xy#pxP94rxA?JV z>$+!q)Pu>?zBSCx1{KdBP5-v7*LFs0vGp)x)Yw@d0igo&c}h~ho6bTVT;h|P~ju5zhDfARqKgix&dwpc@Ftf@EF z*bNJ>J{k&qMN7j9UyJd8#=Ew4zB9^qM&6S70}6jY;twEJVaYmQ7v<|@-ly|cXz+x_@yX&{-jUy^tqB-%~l)89GVG1OuDq-Ox`&pVvMZp+Vil@1qM zeo<`0vtbv2NCT0!FaHKDp~{y$iw~bp1VCUk{&se5be0$zIo(_gvM#4LtC?;;POu18 zM)ovPN5>G{_>JL&lLlSByONRMe8E7VDo^T=V1JZ*=8@u@Waa*jCJcw3sx zMUFUWLjj1cgYjmnwStqZix+m!y(u490na+Tg6KNP?m4BQKIwQf&ILD_#T9*^iRx;Z z(O8{fPHJanom2drp8g{_{~MfnVn(GX*94_PBw1b1fv~m}t4r%E6oajW*$Vhi>6NW! z*XfDvXR{SKTN<;KGFvmVMY2ReWH{isJWtPT)k#J2`Pd>e?qV5_q4=tRdSIb77g0(N z`7%)>S~e3ks&U}R7TBl8RI)9~x5@l=h2Jjm+u?cpSXBc-u}77)E9dTn?uKIZKG29pq7jXZM$}=p+So+YqV7>$3diuIUc@YH%UkF?>&GFGSN!+~uZ=9D zL&-ygIc8$J5Jn}&V;^J31WvY@nMu(kMjS%}2J{$F2HE*Bm35<~xUfAJQe+8ygYbgp zH*cFB*SC_`ajnFDOjHl2AVM}@G#)BJM2FS$J|2u4tD7NqdUC4YUWC6kr%T&3(#_Qn zN+W^BQ23&eL&1FC#^Bw46!0GRfq4Zzpj*D#OTGlJ)nMK~G0ADOsr=Xirr)62vX477 z^Y?LwX7(O>8VU6kl@Erw2KDl|bz*kzmde?B4eu$TACb6b8HTef!J#uu*%IZ)yVo7bh|8RJ=Q5Y<2m_<6RMyx<1*{uXy^6Z;XG`yHQ|w!H~P^ zIGhXx+C1XFqOz}%^T*`;YjQ|lE^ZYjaRMiqZ5R8<`8V{1_-X891MxAv{*s)}$RWWr zk;HD)Ia(z>dB{0JF^-bM=;ibDbcURNOJ5(t(I3n633-ykBS_#M!TI`a)LZ?FSx-vV zlOQs_mX#Y};a=!|NV3+(%4?R-uMFS0aQDL7Cu0@0%a>Np-nn-7+S@N~SnF-5aTe#W z*=Z}GBX$x_RSTW;lf^k@e#vTspt;VS;!Jku+b{j&^Y5JB$UA6*I0u|BEG9dj^f;WS z3vu0SqXjLyX`xilOBSE);Ht2;GaZHp4CHMsto7q1r*56a(Y8A~PbV~;0;>8Kbzr9M#dN?# z>bD}zIaIwcCq2;=B3_h=KSU?0(j0*uf5GsIVM@F(73L@o#~>P|p6*IpqwFwQc0#Or z&a4fmug=|y=gq)gJ+$bih?>sM1k@8O?7>@epsgaqIHK$-+Og5tOL$fbCtd*c-&rxp z#M;!=2MzkH+8>%!5FCfcuep8T72^=91L8$Q!_O3W1Q+Kon9X3yn62Q#z;Rm4;6wfA zayXN@PcrRz`Y&cWC4G}$%v35Re=(C+GX7$wMk)6%W-54_{cYH7=JV#5wdCy+^cSnF zk*sd~zK-RCd6tK7dN)>Ci*GTj+kJyXbYg5(L)m5)vy?XWf)qAh}Yp2u3z%}^4`I|pE#4!I0A2OFY zoq6^Tka@&#%oM{JxUz^LS~gV{HBK3$rYTd@JY|krrYzC&sq!e9B2nv<)qwSk5nI$g zWsf?h98u?#Gg>iKLCZ~%%BX9~6|I`8idIimM{A~PqV6d-eP@o;PSrtMOQb&9Fx5cI z$|Ig=<5XkRJLQcwO*I*qGUhzPk&hV88Y=t3$S_~QpL$LC=sO#{vlqN;=FF3f-?8+O z?P@3(IV^;uA$C5*&&=`B;OumWy}<|PZ^l@7X2Y{HAwGnKk)Qzgdu(tv7`Yb<$83`~ z=kCVXyEj9#+Pm0%_`Oh!otvIs;Mu!j;U>$4XD9%H^M&(AH)!wbbWaV_|6 zx}jxaoDW7r@OpAS5*CCI?>8xB6M|xyxCTSy70YD!24Jt4CL?o#->i^{E5YzA)Kg3o zS3%YL2jG(sGi3l?DdP-NMy`f4hfJIW{>nq<8OsVYV}QAHPFXlN%uzY#fInx5 za1~IaGDK!i&aNGz_ zTJJ)Jx==N4{MzsZ!2e3?18Q0_P@Y?}{Do_h7p`f2;hI#+U~LXZtBp$VWFWvBBmUE` zXsGMV)X>#6ac0h3P`9FJT{CCmOa*l-i`KPpcaZhhDdyM$h&+V^0^!-P5D4%-DCe6oX~6_BQY?!BE+_;Q%RTk( zqyjDw4f5}W_?S|m$}sJ@IxFL}GC+;L5uW97^os2UALatnRJhn8;n`3+Z&%@}FRat^ z3+Zy}4J~WF7l?*u6}$yO>;)Huxj<|>2#dBN77{MOAfgM=$?#GLAo#h-wZcLs<8uZmH%R4>YEZnVqLdX*Dcp|Lz#W2&YNH2YfE;Cz5&TMAo~VZ zOaO4Fxji`~HXoCkkIBu)u*kd9wm-Qfwhc*bLvq^?7PagFcn^iy$R?^2SCiFJ;~}~6 z5O%!-MOBG&$r`Egu-tf<7I|9}?TOpTR>^xv_ChV0H0*d<62`=2vR(3Y%N|6qqDT>R zR96r0Y^mA?dTYc{R{nuFwuyI(c-PJ)Y9!JrlTI=7dj=F^#uekaE3YetMa6JWA>mkn zGKgYP8BTFR3GiBA`etzUMrc}-e9%&p&}Fa$9x-v|7Q&-fafuwylcXS%nOoW_E77J* zFMVknO>UQHV=U4}lkO$jn2NN?=wc4DZMi%)~5cgdp>>UyK|a;G0kmwE%)%+ChCN zQ!kK_+E*h}hWGq$D`x`vXkKa^;W#$P&W7%?D9l*;S!abgR=Al~^=|y)Fgl0J&G-8H7j7c{pJ(vtaq8L#g89_Gj$Fb}fmRT0&p{tPJ$`4`L zX-v*wGJ?rjOwK`~5JZRyv~&(6T={&E<3MkTA$?Fv(Thi^u2jG)Dv4FyWT{Cnbo6f# z#n`_<@*xB3?Y@1RG;Wc`wWAX8$;2lTU#e6o$dPT*x}w5GsQ#*(PmU zq%CnwB3&}+5=j>=ZrLXLw#dH3vP1@CG9Z!xSoaRs{nLAl+1kG*tiMk)JN1og{p%;B z`u%eK{*?=PkE!|==&rsiIW5&4T{)lXIlkHYn|Lz5){rq5t?{82PlOAHf5zX14l1FWC2CHJ{VD9C453tdH*{pITVFi+## zqRZm~^zfyDE`bt~sys^Ltit-8csqG+2_nG^};L7lyUO(2$cNlS5Li-}*Le;@3vM(s0Tv54K&$k?zd>)R#w%I)v#ZlXXWLn^zgS`S!SKD*wq5R0$P}NRfx^WT?{^_A` z20g`!j9c5AWMJYwch*bhVSkdt8$iD=fUCaJIE#+p`OpiX;0{&~H(pSuNKeK2Acs}} zRntc}>yOh#=*&_B_E5SR9cA7*zKk=ctqZEZz!PU*0v!Vd5a=*viRX`<6d5bYAKU#w z9X6KdVq*qv3bAo$*jP(qW7Unr`g}^?AAF%TSA-Y!cvJ3pku~T1f10Vwbf&a@GmJK8 z*L+JWr`o4o)j}!lvNLJZQ3y3v{Y=||9>v$UEpF#*$PIBDSMis576b!x|21e?blz+WV4Zu9*T)-X3U|+LzCP~! zSG6eTs?&6TDPAkQn(ZT(c6tiM%Q-8~kG24q_o$A=H^eJS_Z_dDDcrf&0lt-`+Z2xQ z{y*vX+o8uSvkv(8OG;$@*Kc2DywB4La=$k}iUFmJ9xLFeK9ISNczKp$l*2sBjaUlQ z1T^yAd6%1D48^~fo_J&28*eK3D)ztNsP}85uF^|MJ>FDGp((WB5+<`nz$Y3!eS7^A zKyv~m+UdxxR2%5ZLdoXh;Hg0FiWBt?{*Ic0k#@u@0M=hz2fkYdSj)BXTF(6*-DB{m zhoH%UUbA2$zJ)didM*22h(f;SaRWekK3#nJ@#~)`ZP5%;aKZQubo@E;hO@^l1vL0D zUI{e#cRBQYuD++!jg4F_SNCtE8~^KdrgymdNhb5>Z&=#*+MN&&R&DSgqA!gN-3dVo z)+Ka5WjD>cILEuxyxs{p#XQ1?gOOd_ckBYwFuN#Od7PYG&`_CO6xq9|fA1o3b_pa) z#_pptyJ#y5fAQx(|9KZ>mulz7U7BJ%f93VvUqfyeg~MfkH;+{nCs@a?f&nYUUjXzJ z$H=Sz9fyN4Ffl1+aF+?-@4Fca-?%C8N3a2z2}dGVL+It>Ayp0c7Jz~t!g9EyPmsqC zgJL>67vaE&1tYx{S`<`^J1%{->>?QWdDJ}=Gmmy!Y8#_gGO*AG?|`FHfbtkRRuv0B zC*a)(ZVc*$P|Cr;E6njBYGzRF3+DkAKaK#cq1grSXwnu-;rV>S+!)p|MZihP_hOqq zd?tW9^>6aHcTntz_E;nmo{xq20j$+eYhf`C-``9v7mAtAD4OM{tCSC6)fozbx&Q(y z4IS!(N=d~K=Fea~)B(_EaRMG^F~y!WZ^sn#qU!sA z8`v7l7Xy@>SPXCrY>JEE7vV!NS>#?OSGIUmDZ2-Mi@k8~Rm{}W5<_-O+ZRjR(iR&! z_A%t_e2Y!?&C38R)=-WXn?F{Aq3=QW!|s*x6sg=MzAfT=a#L>ZlSscz`bE;8a=9Pb zKD9k?Janv>Q&qK(yq|g>_#XOJEU7y0S}@V^*}=ziQr+Pdd#c`-FeFAld-Ye9Qhm>g z11yVHm+C@UbN|xw3(sTky7%*$mvu3S& z?P|jPS))V_$>fko4rL(=PsR~)a#CtKDmNY7I4ac-$n^sfIVO{1A~}|Y;a>M9xX<3( z=>P4Br9jy)ay`&XrtuZa_Hic>$8r$W+1 zNFp;bnGwlM4iaml>#rpzfIxD6k3^2h{QjQ-hR;VuwjoWvyK_mbY%o+|Ioe` zPX?uyo{d(irB8D8L#d-?+tIY;Xj;3r{&uo8`TEAS-@Yv!KJ&Cya*WE3QIY--B>%wk z(1XCnDadg{2ITw6iN}AjQ6;tXO0K?Kh!Y9ZXZGa8hGoY8`!gTWY-^xi74Y zVU<@CCt>jIeH%fk{kY^Fgi?(H6A5$D@GEOFlsuI@B{_y<$B;;WDzc7G93NNQuUM&| zlZH0PwYIgHq~Y;<8fNeY`EE6qW3hyVfV}jieT`d>NR0>OMl@=}GKfAa@RIR&8|- zNu5Kf+WM98l-IX%G39GnxtwZjT6r~f;Lz80(LMC@b8Dt`yX5YW-5sL4W3vjz{3EaS zA4)F#)pBBaHMACb5M7OKwyvDt8Gr4+Uzt=n2>GN#?mD^2$)_g7$v5T6Ti+SWCJYtd zGnjtIl$|$JLLSmR#_k-ZiR3!FYLIHqu8yq@Bn~9UH!cB9o7nXe$$ zcGW#{e(GFXOg4zk$b+)0PjvOAYP@@lq3Tr1&2GE*Z@Kp;8#V^T11BZ-u?BlOZ(5r`_DYBdV2J4Yvlc7TfQ;T2VB#(?e=fE{mH?Nw>IyH zUE`AbyzD+Ny3eO-SnOgr7oJ_kv%=Y z;8g=OSsh9F+7lz|KY1{|dUmahJ^;s#t-q5laC^7i9b4`WbnT*Ol-&KYyI-U~;Pand z{^atL{)9z$XBVN zqN_)H1J3J-U)3f}U)wh(HXV{{Om>Zl^p|RA2eIXR1;rMm8uIEqd_JD++c3g6q+?L- z7~BNGcv5aTDY=Ga*Rbds&V>&w2`hGd^V&CWOZ^ul*UPf&Ws&~&UNta|R?*IiokQ^3 z8#iS8qaG>0kw^OpkLCy-_x^t4IDZQ6<9;UwW6VMTS0JF+0)glpw-CX+BM`W~5R9bX zlm`Ob+%yy$LAgP?)uMjuNjgO%pG zJqD75nck9}^tRbgZ?rwU6r;E4bAHlRj;*z%`kKLv6ZoPxN_0B3X&9Aup=OQOGt?$? zBh_eBn&Be?@oHrdac1I-kV4c@$Py}tn6$F&Zg-}PGk;{7veK9|8+ghrO8u2=5Fw2* zi>Z-$IyWS0_Ncl)OM#;QZxTYrB`s!(TE_3QR~p zfpDm0!v}_0qTcTs?lOzUpD=eJ01?Iq<}URL$3Wnn(B5atne#i7O*1ZX3r9B&vbT0T8DJBvFvXZpPwqXe;D*so$8TbvHfV+5*V zp678lXrnJE7{!kJu$VyovTF2K6$WB0#DX_Mft#?En&a;&RS;mRF9Lx!geVz8s#HPN zX4wx_xq^n0CAP$>&|;(#P%HKf5)e8iVx+*k@BGC1apnEWmCBu_w#OINFJKgshk9~B zOKROF{w?CC`iMk^Wil+1;k_~==%^`&OLq9S9qn6=_5`S&e%av{=?^~2pc)&f9-B>f z993fV=u=GKm#eC(3ags_0N0)oTS4XzYE%=dT{JYRbzgE?Z0(g=d*#+%ENb2ZYb`Cju+I_^_XWcEhS${V3D^9!Q@m5iyT>%k9lqote~xThAAbnmW{baCcS)I zHsyx-#`Ex7HVbGC$xz#j&fu@}oVP#$wDQm^ZZ1Jn=9X5cU^F={pgi0xI?7x1EpufZ zNOp}5B`BZg9tFx)C@rHWlgeF}xw81-=4D-3rl+dKBu4ZAVo3RAG zd2KmzZr*(08fX&YQCzoo$ElcC0!76BDD?xvI zLCsS1&!ZyXQA&d6eJP+OQ=<0*U6JZpHJ2CATf6tmho7?;OLiEKgs?Abr*?954|X4W z5mX#J!!wEJbl8#fNuH&V88B%djzRGF5PS64{Nf2XKLQ1Jsa1T4eHX0UCs=AS9b((Z z$48GJIdXzkwp=?2%43&R!k4gt?_N`7Me7&F#1!JI+8IM4;a6I5L={ zw}HhmE;%wq2cj{*x|KB0(EX*Bkvlg4gJ=wpxfEQ?Wa*$|@U+yH!R*M9akz>Ncd35- z$|O+is&;CLeK|TG1l`gIFYvex567Ye79Id1j#{fY#2$v{QF^=y^7`J8a)oqQ>Qt+< zlW=Hi`X*|~x~|XBIPch>42PF~e=;^7oCQt#UL-WUUmZyM%uGLAC-$F$Sx_f8cCrWS zol=o2-e%xQM(CH?G+-3INVwaNMN8hhWlZH|bMr zR@km&vNQ8B6@X&JtlG|zJ~=`+D2g-v6ban`PZdO7(;bB+l0E`Q&t|0$W|3I>Xj>MO z8G7igB7EC-~;nuGNY*#3EgZV4haUAf(pO5TY2wP}RwGkf@uR{|u$2H#eCL z+h0Si*ac9>VQ*6f#&xNxO|EKNvFy|}iN5Pn-8*vKJK*IYHEP7Q*p{{Lr$|NG(c#t3 zNbLC9#g5~WZ&3CP;?AOx?tma%`@j}CAo{@q9g#^yB#~@mx9mBnIWBf;nx0%;e_N{Y z%QgNL^2dhe$7G$XICnf9iFeZB)D=6vu9U-{ax|vu8~4gh&9?6tNWRB}_PJ+cto7Rj z18oy-xi#z7*{N@Pu>5d&#X->=lYHZ{Z@dVamn3poCYMEWIYr#tqKZfn_#U(g+zK}(j$_d6anuL4Rjxt zh+igtk@)}UZhUaz;e{3JPA!}8NVNy$+Jh^$9nZd$qci2GQb*Z{qin=cHo_?HSKuIF zQku31xADu=Iju41S2?(a(EcIdb_86u!X8ev}ng!$c zG%YfX;N1{pB8WuD&IADJl}MjV`b5%~on1H@pgG+#_@9)>0U5ZK9LO>&oQujvZc`V^ z9f>r<4qDwVLs}Tv54!~h_q)$f_~GGXFvb}!f64H#;o8HuYUDZpSCC!m%uxw)jda~Q zo5??8Q#5YT`12JL?G?^npvnMpGDgiL)4987&93(i^!SLe>W+p&h;>n`73f8XU%|zd zh4TUdVb@-&uCbu5v1DCSL0wbHy5@qqX0RL-$2V`!7OTqDs<{GvoFkJE@C_lerns52 zu0v38&LP+r3Vny;j{pO#g*lc*uon8@s8SAwoGorCVJ#%fmJcilgQK~uz0hx8I{i95 zzmqu#PET^<`5|rk=^2Paa5{WCvVezm*x0?85Q?(VkiY|XEcL#9W$fxWJ34mu_4Dtr zY0kYU^(htfpkZ3_>p$3 zJ=@S02o0k1v84k#Brd~&C+!_z&O)dGubTTV!#QzV_L2WDz>oazG5N2MEObJWwPDi1 z1n&jT_>kE%Ras9{E8m+>KMQ}z=*(X&s_O3mlx z=JPABrF;jseLY*g9`VSTr)`pNO!kd|AGXH#=+vjD5{IOk9=QfQd1cP}9apVbcSdrJ z$gUC51#bDeRm&fm52RYVQ|-N}wj+C1Gg|=%r`U=WaKJb1IT&ZvwxfB=(JVPw*};mL zU&f9J$E^%jHKD^B-fR`Chv6qVPRWi_VzFPwj(Hj~G#=#NpRG9C&wN|WV%l#z=QVzN zs`^}=@$c%)koVj92RM13V)7p`>B0n6A$6|NQqTV!6SQIQ5+;u^`2{4oE~j-Y#N?Ow zVhNHTK7`3p|Lv7I%zb-Ik)(0R+_J}L$sI$P*$LFlB$gWU=?!kP@2PQf@VB#j3_QL- zqB@BX8Gj4^5N=A{2VRmVn2PZez*KM*_$AXRHdby}&&Ha&;o zOQ>E`t38i)5*|%niZfg1JlOQnxW%J2h(;#Sh$}U~$_!CX^Y@gp*?GkdJB0|?_VNFZ zNFUA??#;tGHG0tia9O5*e}IYizVUTc;|8^a^`X;a_4*#zB&Jde=ct#Z!Ui#e@>VX<^mIe zxq^v;xx$IUIdMXqE1D>pE1oExE14*nE1f8vE1M|uQd~K+<#QDi6?2snmFzipwrZ|= zqIz!I#5VTqo2{9sK{(%R?L;m9`e*Cr>L=>Go*d6{PbBYGJ(2uq!3Vh>&xiQ0T@wxL zoq%@%$Gb-Mt^n@}9q*dhI}z`S9PgUhyJEa6iF$461tY%mo>1unN$|@!k2^nMhPh2hJV25}kSx z!4crt%xrXQCOo@v84u@Y64A2@voljS(|PBPN#VI@y5QV}@Fkw$acL%^Cx}=U<}NL0 zv7DNT#q~fX=Q0l`Y2dv4evxAYA-KcbAB!?#ZS)1qf#t76`xs{Kl4&lnhoDf3>}KR6rP`o zMuH2PPjGrc3bNoqbNFBwnY|gGnTidJPvw|$m4|!E*yl@d|AF@{PaIv@xIVzmhsJ{^ zX?nvwJxksX;Qt|i=k+XklI9e6(1=?|qf&S%x{2ht#ObpH(I4V$aB2@%sVwZW_~skF zA0SsB@|>}J3dTG0-*|cE=_-%ko}>7-OsFM~_3f>krJSUgd7rt9qErJ>+Zs5f6rW@pxjRAZWTSg{NM;jOk}SG8&XFUkY~*_6-g8?HueI z9N4ofw9guLLBjMn8XUTIBN)Rh7zwrw4v!r=vdb-4zZ8zlEXGEII}qF&U}|A@0nxNg zPfuH4OfSsG`=`TmGqX2GgIz}!=8;M`*3}p6I)5y9dSQNnZs+hfb}c*=?Q;1zh6&o9 z>tm@^&#D^SSkIr1a5+4m-T9}RO z)4cR_ccQ3?y7N=fdF0~s;%t1T{|JbL6b>F+jA2Gilpey&`cgD_K|;Nwat0GcPn|g# zJo-{}KF%I{wD7neUy$Ngx(7qSqv2Sb{+Z2 z+=xa_u(E2eIq{PUQG4mY(Hqyo^O2eP%V|Fgn-=lap?F-HxwHr>QbmCpo;@^)!p@_` zB$V;kf^>ReF&5PdEKyUW%kdT3I!;w^IxJnDnUAFlC_89mL@v>cn+}8{5rfLcsD3AB z83iwnQ%xe6{v41NX7pQeidDV-RKkgq<^hW5i$!Or(|KCX(s>|t#3=aVGx6DI+Iu}6 zn4FwK|BFpd#wZVL)V%WLGe<@*J{<#*ycmhTG&Q@h7`ZqWjlCFOxOVYe{NmJXG(6uw zJ%cXPe+|6F#c#(a!`H4&Ub`6sZ?WUz;>^XDaI-KOiN-Z5I~fmO8n|{-YC{rzqpC0} z3!|TwHLP4v%39U3*5$l)p?FPbNeL~<*5swt*40ajutODg$ifcxhQ3~DQG^CnXpn`5 zU*>trw#nN@mC}7`>Aq$Ex}fJsZaAk1PpQIFvhWo9kd>@T5t>z@Sr(e{VMD9jdO)c^ zsMa4`_NhY6y{2}#W4F?@M{U|e9`*NXo0BCo?Im?Ic zi$y;;^Xi$EaYgJ^#a>zLT^GyO#DQxk!bxPgzpQ zTgrW;RPtv$wz3e(bCj7pB7!4F{;XjGj<9)I!xlKg=4TCC=m;yIR%q^GSC(24m;Aqp zmCRBe+~TEtxFt&h+|qY#YzxSEw)QA<)Kp+8kkr$){)v=3a#@fyY=t9iVb-vfj<6z1 zZ>&nQvEfxq1t@FvQX$-JOCl?`gFU1kAF4@oJ$)?_jz@zrT7U#+Oe>IJ>{@hcW_o5S zhyda7bWxa*&q+4v$0Xvj6D2m1nl2y`5M>lKA)2XcI$xs*v9y1lh-=yx1!)B>B*L2Z zT?biBl-THRB3~!K3BjPi{T7@Tx~=R57A45#z7oW^LeU}6=6L1{0AuU%x#HOZO_{6l8Jm>9}R zzOsI^3sJ`2!$kIq=YC1;hQ}MIz;wENY$MN8+xA}ZJH^SDKk_OyqiW6QauFyICipd> zEhV(c?MLsiS)h-qgKxd?<_pPFcUzRIoodz2rDb|`K|Fc$CLZ+<|viJYUS{9V7;>Ot|CLH*JTH5 zWgxXbbb@2Pq-ze%@!4~f4R%_x)i?59pUH-`Zjg6c$gm4(@%yAJTokbZLYuUYDK2}y zogT7_s&yZLCr6t@ZuoB9|zsbTM+Q251$pjH)`Njr3;TwO}_T%Z>>tT~` zI7H?6CJWTKlg~0IOShmeFqO`M2`pA=9_T);ypU2vA^`C0vOkT(xVsQb#xr zu3F+NbA+og;zJAZJm8+EgL@tq?s@oW9{{~Ww}d4jS!={${u?Q`riJztvb21KBV>M- zmUoPm*A8REx{q>O3$Q-O`^{CN$Lva)#dRY*hr`Xzc#=&<8s@*& z*@SkgKwK-w6G;nTscq2&zhm(vj@FENw(SVhKP#BX0_kw)jttznbBO+B3(6tSJ$A(N z7SF*8EXDAOwiu<=ZM zLBfX@5=EA>g_@;S)QiTUw&RYWoD9hXGZw$X&`GJ6f`EhaNd4pxHzE;{N(;2ijH7MR z0_uZy@?s37)VBX5!ke`)v)KDfmtx&^I(%bhZgEb7R3#db=|X%uNfBL&&!>wctX+&} ziDqMo*rRkwbUu6uT($vrrbXQY>*Hu7U8+AtjA}?18SmK=lEJBLPZNRjr?7wdBD-0b zpGUQUpVxDdE(9z|U4ycaF4wSOaHC-$+2j=q6|iNdw3Bj1D>rF`vR6pe09cjbry>;x z`;&2OWX94a*p;>!uBA)C&uW{5#s@i5m$an-aWFLS*!7CGCTN3SqRHH+InT@?(Pt1Q z)(zGj%Y`jKqIm>~<`E8bY+-hja$Swbu4p4Dc!A>?tU~Zb6e{hbGPS~ z3)h8;HK8#jG`>5XeD>{yI}3_1pb7)BFmSJ?X@%tKJQf(c3Np7Z}B21~m zlq^i~7*-;8<`kh%75ZeMF9V_0D?*1VbjU&n@>|>fcF~<8Q=FYCp)(m%gkDwXm4)8S zXO(vv6`@NNx@4gXAJ(+JaqIT2W$|8pXY!^}zeBCxp<}S2)o!I}SZx|+5G-3})~pE> zuiSf7b^JxbkPVARVcl*5;m`rT^%Zds`Lw7FXjJ+3q#QkxGk zxc6Q|hupbGY1pea>?M!ddqHIAfD$~Y1{ti}c&`yTA5|Lnsg3(6MBV+0ZE8i=YNZ+) z{b)?xe@;ICta?5wmroNc+{>Kh!|P)44^F>&dZnG?!0^$r;Ymg8SH*r=>|Ylf*2K<~ z*qM|Ru}2kqWcIsH7&-&In-w9XqKbqN*8hPbpxglV*M#nr(7jsuQ#8qLRY19RYxSuL zZEHerO6Xl3Q~UNS!egrNm@GWTK3x-5VN`+i;u-?uzi zEO>wDz2#l2aH8-B6Jc|1X3W_0t;TBiC+oov!WHP#MBfE1rjA} z>n^h;mndOdcP5Ww+S4;X6P{>#uF&YVMY5^-{A z0fgt+Z1^%j&uRLjZOc^M%%Tocs6_mH3b_U!&w$LYCM&I zetdy$<}4q%SJ9drRw}wMDwoIB#ZqnHz1Q`hHffh7Kxq2Nm&gReW3)A4iBa zu_+}sy(`^G@L>nv*G{jAEh(`ji9y??id{1MZ3K*zoTG(`$VSUMY_xz8hKF_~T}Oho z$J4k1AnwI?gkZBPN%1)@>vxfy1VUh|B^M*o44b%(NY{~q5y{7gIWcQGalRb)c&_r1 zl4V4i@{x(r5(5V&Tt;N3{H~ydb3{V!htNUCWi0^~1i2WuzC__6@VxWU>x}R9PClU4 z83$NCiNEQ>sYQu4r5T>iI=O!ek`Qq=aHN+IM7mB6t@$Kkp{1MTP$Nj+fs-zsjLu>A zoERPr-_{3*7CmDS2@aLMi@>&VF$b@#e2vbI5tJ1B$8cWpfcV^gY^|a%RnezZ45$?Y z%ffnflU%VMzhve6jcYACQ!P7{mJzjOb+$}mlb=~c`wd#RX^?*`6s8$bxX)LT>X-O5<%Z2s& zEY_M5Ta)_~al0yRm&NUD#-o`{5!+O;O*VcTevjCIubd4M9z`%7uleR`0m)qhS&4j z9BPJ7{_a?;b8tocm}>J9jYlyTvRw)+{-$RxFG_529yDe~j1gT_2Rp88*&5*MRp~T} z01;OoEi&1zr1a~$4^5!yd~G=!84s08(|C~%%)|`t}J5A~7!puC5fHbokybjw@)=HK3&bUqIH0l$b zSq8L?Nc{zd9crWlyfR>}{_08`ceCME2->261{cg_h6r}1$H~hI>NFL5-p4_66 zUyy@G!zs|#F*+5w$`?3Tu@ z-0&Urw{Cc{Ifrw~{oq2m(_KKDlH95B``b~ihErVO@7eHd=4hp-{7vCMzl@pj^F8pg zZ$_H79d%i5w(Xb~31G|7JCV<}90iQbf^-SP(KNw^XJ(?;6WtoHrb$BtUxL@8?I?bU z&il8$qZsFlH$tARGcwyJ`i2g7u(nn3`YqwnjJdMp7+&o_Q5dy%LblX#EQQ@&i;U^RceM4 z@`ZBKeztaI{G0SW^2|;9Xsyg1m^b;*LYnzJAQDNRv1PXOLyC(&)4naV;l>h?ObsaR z!(G}CJf(pZOIJ)zL7M}Zsy5sv$v5qvUAP{V7^Ix8Hv-I70l20E*A`-c zA|M5j7I4cXF=+XHJ)H}5JMCl97iwR#v>t-3*4QQoj;j9eC`zmvBjKx}s=qAtlsCFb zh_)opt$J5?C_=v~^vgm&!}DwsqM(e8%?z1auY5*kzvaOFR)({IaODR26|rgg#OqTl z!ysL26}_p7-qjOI#V)mC7YNtT$nuyfHm+QGZ~mQmrD;HI8US0Ik<+}89A3>;#6DH* zli3gHZL43a9!XV?DAjw^>OBO?8gN{FGQ3*G(NO@T_-V{DW2I10bkQV1Q}*XWa5vL& zVbc|#V(3%ja?xJN$+98n5D3NQU5-170BsPzRp1g^^QOtfg;C9dv_{EU|Ob|CWEr@4mo@*tY$% zZ^CaezkYXCKH?M*$g(tW*@QQj%qBDdVwIPrWVY>klq~OWq$Ql&&V;fSCmN5inG&FC zWCl|5kR}V7c$CCE-$f;S(Xx5zwuh^2T|yfqvwyp5*XFt^w7tm_d5}b3lP2Ii_=FtB zcM?TeB-UZ$z5n5#2NRE~ATmfhrupLw!D;Nc(j54*621CCi91dGlQuZRjpNJA+W>PyGrrB{Cz$-Q)Nt;o7);1>T z*|zDasdUUj4RH?iNW+c`57D!RP-_&7ZEk9DYMYy(d<_603dM+(^i#?}X2-BVQ-TPi zy|Eeu+#HM1kf(OVr9sC(f=8mwL7tq{^fLsEeIJv{E1pkFs=j~YwHplTGE^XZwBP5W z{XRH^9T#oTV#H8^NLKKzF@}C=iV-H?Ec<|VDyHuPhE{IK+d}v$r9EnCPuBfFq)+>e zz}XHcLXRr+$U;ws;#f2usvA~LDpj3oRVN9vmahrH6x2LgCKTbDs_;!&_$KqDeV!lX zykD#c+f`w^ENo}KBq^Ib%=hpzDby)Ky9yytp`B&?^(CdWT`g_5)Wl;8U$ z70ZR2AxjS7OD`UzT22h=Zvx5l(jk zuB&qrwbt}IEh?YVKSNxhJWUkFM%V->=ua}TRv4$WCN<(lYXfQ*gSPz`&xzh_w9RE9 z2Q`*(&Q+dXX(DbbBFUCerA9I(IfY zFP%hUAwgr(OLSn$;MJiKt=Sn77ntNFg!yPwoF=+-p~kl}-D~Mj$wy=poTgJ9i%~PP zY0jAG(&f75m!1e)_q5t4K4+K_n(zJ}#P*8keqr5OVK7w~R0>|t z<=>UXO3JC{%0D=B|{zfuxZOM=Nmsge%4q+`7l+Q7F<)=Ill zrCoCO;k&2c;HQ)xQA>}=#&4s_Bkn;_Hp)Gvo%k%X5dsA%hC&8h_e0uHH4?3mBqAk} zOpfc;#~pe@j*9=Wm*IS2Y<#@pS@#XezM+krT7Tb$Cz~_U;;-EBaOapeNYC6E^NRj4 z=v-~p)k;V?n!!934p*o1)L8N*^t=sfn^`mx_=*d4G~u8HvQSq;_v z5LN%Ty~6>--lznvDs=bno}n7E-jLlmjahTZ63#|15U#|iMoTyw6+k!+E1Gw|anHZU zJr6TYJ3oD!=#n|%%$y`K#E#3k@_MZwbhz9R5FU1&Bz-5QU_9|>S^0k5oC{33z?h4> zEf+4=Z(;0d0yprazd0K%!(gs?-XE}KL=#HNRuBo{^a8|SMuYu{z*zL9nJJi4=}!zF z2}^`_1(%=@Sz5s8jh2s1koKS^*+opqFb&^A}6jB#C z-QsVgDBwqn>x z(J?4&H{H?ZZj6{Ls&=!~d!1um<9jp5i8}6sgSq^xNxYkkfB+rRt!reYzqV`yWRjdkhEb3l;OR`6)?^o;lX;Y`>UIPg#HEdTKwrk?o^^7O4|At!s4HkeI;9&MR zcHNDuyN}CdCqTK$1_vC7D{R#Uhuhu~43OZbVN{$iMFS8QOEd82{}Rqt#MB_d>9Wj<09>w z!kA6x#o2gG`>sVJv2=bM05S$DjU=e%Ox=Xh95>dnSZ51$t8h*6){A+vS!e{5~4d{(iDk z`zt>m(Qg(K+KnRIQiWTx@yqaYz{Jl|Wt}4@BiPbLkAF#yEBp3u=nXkXscg^T#=^c0 z%f8`_oGO3gh9{ddkn8_0*t3VbT2?8oDg5V`(H}qmA#z}rLV-GbZq$acP58;zx;mW5 zbMeb}@e^2#HtpMq0+*15ETnB?Yof>{WU*_UC9b}u%-5#>IZ^H!wt^}Bdw*4!sEp)5 z#lH$x6+IKx%mBbPCW%wS3;@(J0|0dqSi_vCj|AX0L}2}LqA^kkwDX^CETt^72NJfZDboHeL|5MJoPYnpI+dFb?LHa z$imc^VPFdODJ{Ald1zW{)X#q(4f<7>Tx;`!6@euCB%koulhJ9kcSJzh1r~P;aDsJb z)*?k|=`#_YV2BGf^ROBU+Z8(K8UlCD1b+rHk?Nq?KLw5M%B(xl>0 zBSDC*&xN_Rm}IKcq6NV<6|62~7AVRhYFRbQ&APpKMEs|INQ zPS^BXxw;B_Xu6)BE`4H=?&bksG>OVUS_VIjbwN4##mDp?!q+N$66Yh+u2GfIKFQog zya_PF&@>@8m)u+40Ku7jMDsLpW9`Gzm_@3@Ywx$aJkVP~jK-A_!nRAGU_Rj^oW#(a z@+%~C+~(ND}GL0f*UsZe}otE}x%ZoQQor)7YC zfzfh>03SWI=a9==x!;9BUVw+}?z;E7M=M7tKhbdt1JKPJmPN8Uou@q}%GucNH}4uG zDk7Rq!hS8xz||Nm9DAklMChm_!Fn$Zap**6Ph>P0DwIfmoXq`9!w52L`OwNGofo|U zh2~h=w*ZOiw2wvXqv!wyk^wJ$mVn$Fj8;LPSkoTXvOzW@r=r(Ld@b$22BmB+VqK^O zz$5`vT;A!lH)>Oud4RMNgU(^U`CLU(0Wrc7AsSF!_wJrI=5Nod)xB!<(A{q+)sM-K zUr?$q$j{Cy)w9cm8#zK?ACSVel9p6Si&E02mb5`$py=Ses@j!a=*L#|s#TCc^Oo&e z{r0`?zMlr}w){l=srdSsTG;_Jfn~ecJLY^wp?*P5#xLvL7himl^_})2&(jfFYu}%0 z->i@?o&%!WcK^wUP%qDieRJAaPEm^a`~Z;!f^0Y#KWq1SQZbj2iwHbf=wd6Jw9BIh?lg{2F+j9TsZGPNaxDOhajQgOaHO!fNb*i0vhpZO+BYAgp zkzM%bLwQ5ElRHD(pn*i+@o1duT{^8F0kSZu9i+?k0l=(pYOgLpqO(F*j5kxgbfxY| z3IJR^JzcH)zBD_>tejoe6dIUN=>vi)ps)Zkc0;F~Sk4%oefnAq_8Llb_?)Xflrpu4 zNr;{5h6|8MLt|e{7lsScWt#OhP-nw%0kT~Zqis-A<_yptC_ginkS^yLpEUe_0()Hy za$utHx>U^<$atc|IgVUL>u=ED`3Ax>MTMf4`@P%O`v%tshBoqj?d2F5a70gg$9h`_ zg#XEhoB~gG-#UIh1M8uF=p=N(pPZ`FHmC=XlUrU2?S%3`!6tN!M60cI%t~vZ&|{)V zcBK?1q+z0aD1&~Xzp;OI3C66llMW>r%=Vn0HDaZ|w*2KHMps_f3Hu>NqTi0o79)&# zW{J_Z2m}Ajp-X$dK=U|d1lYju-8bQ!;_%4p7R1Vt3a9gyR0v{mTnNEK+j$0*#bkWO zS|dn>D=Td&6&~8jPh_oIQyp11PYLF6SCMk$l7q*aq{73vq~Z!6TT_9&YPR0tE>@D4 z*rtn>5GM{L5G)yC*@c*Q8aQla5$?e)$7l(k%p7(wI!b}vZ0kIN*&LJc@wOd9^e+Pm zQ*w0KEYLWR>Wpn23QZY29F|5$VO7~JbLJF>hUuR(#jMyjCBTi}AS1yYWC6!gK@A)Yrob%D?4h&Q#VFTf{f;BQR=FA%i|&*Oqf$uj z#u7s&%|+U}U~;kN!CKBngGZPF?nR=NOdN=9`x+L}&9XtEa*1Yh6VlFBmZiqZl9r{D zm%_Tj1oviaDS4AzS6Y`TMZ?E5QP(=-^BBvHvBqT@5@i{`$?Mv2A$fr6(R4g-F8{l3~Ju02#u=HC<~2P zqgK|eRdlB+x|NC^wW5bab_^kzjLgjzgJE4=-$8a7V1=dxVC!ooYeGj#=m1~P1##VS z64!-8Y(mjSkEgYBt>y7l%i~JRVYTHjTdnIKu_ZUG#_W=JKdA_(RN<5?oI>m^gGz9R z8f5lq8f{^B$|KJx!n3OItSmg6vAR-u$6|Md(ma@IIjFQ8Qd^kWn5ImDE7jVJof_BW zyHPb}`Ju{XaTA{BnTPT`ScjuMotKN=NLJBUJAJVz*0~A0x^g=(zv+6$hy-)AY4Z9|uBtMG3!Vn*&OeZ`W z@q%OF91v3D_gQMd28FX4;NNVa2DrGZOKGyz0FU408_qwOYrx8fp!}pFpJC?A~-ZWoRxdA zC^za7sg^`cWIp@3DHO=eeq^J=5B8(_HE5WL<2K+i+gT^4QT8xV3QKnQ0az|96lVmo zFbd8iQY=wZAy^Py#@3>8LoYM@1U>PbjE<64&{P;3hAx=`36EbvEtABqBe^U3c`eSj z4^L~9nF@g7%~xcb42k52$XQcSZH(?F74sm77F{H8j&WNSwn{r_=BqVnbYypQ$fXni z-E7iqwc*mFn+N6m`X){K<+k&2nEMV(ks#l=!+v`t`emar;4WRdWKr#62P=ijGP+$7 z6KEmOL6cS?0+_DVKv^?5JgLof*oS9xUPPLufOWdJ%f^pMltnv}CtbnWV;pW|#8+v= zV16n6ACw=W+nV4E6Nq1og>p6F(_CWTi4%^|0sx~x)BPedbcxpNT&}@2e~NIi_u&BF zEw23jmDjE;=dKHdYXYegI`Z9^nL;-3u5}8dwuXQo;yH zvt*c03q!eE5%#FU9$DDKcv7Yjo#DTcczeIxe_Uxhp|+hM7MRZYfx9LQq=W&aHkc)G zoC?&vCJd&8!Ml~}jwcl1NmY1K7M`>u4t4AyRTz?mA)dG*)Z?@TS*SVM%ifkH-|@xGEf%h2xeuLtL}%eo6VV3*1M2mD>ID2>=aW2kwiTpf)Q*0+xeP zU>8^pe(w(AnVGi?gKB559V~`9UcPZur)-|%v{d}9JRGxcU|eUyvXzq~}P z&i>INTesnE9>X4g#O-bXxOs!Id~srW?iEUdyR_xo3vTDdHuFMd-hn@ z1Qb=G6SRg=GbZK{jSK|O&f=&ugYX5zj1mnPB!;?|wqhXgN24FVLwB9z5S*pS#{Z9W zNAfVz?~z08F8ynA7}@E^ogv~rgxoW(-cT5uW@lv^B5BM}JePUbS^f&5E`8Zq{tRGG zOAZ3@A0s-mjL*z;2AupeN?E&F)^6tH?Ep78+HH+*T)BNk7wICUP;6+ofrEBfihybI z)|3ES!>|}+2y8JiF8b(4y=v?3Kd4vtjw`|$RX8IHXFi3%*gj?3c6HnKWs#XM?@9?> zWbtS3YfxD|6m%%W(0e}JEs`DuO2+k5V?gDg3ZO}O}vF1-&rw@Vdv$-*ux^F65u z-%^Ed$-=h`e@*E5q#`__3Qx$w6NW#_NABu*o|^H3B-(!T9kpx!AH1j@!0QXDa6uL> zSnRK7;O-1!p24=)A4dPs$-X-M!wk!*A7%l5?=F>=ZT>e)r&(zt;^zwp#Lw^D_vI}i zT-kkV?3&$X`;3+SI3nbR(5{! z71t$2<{MWAKTcbN`jtV09W`a5ymktPTHw3m$Roecl1B$xZ~5kHqU~lKX5q;h{IpiAmZrQ+jK^XC=?f z0M(grnLBfi9O)7LFG%dv=|A7P2ZXj|HuTfQ`aZcX?ya#Jq-TOtadgmg zQV*hS+!w_7N#yK^Mx0E*BukAz)C<#rOJT5SOe!FcJwxgaPw#;6wkJ+UfM zCm4rxiEhK2r_a#JFy0$ByaTiu4+$9@_)VS804TQ+x6+%PTjAZxi1{MEj{O&nS*fgl z*vId=u$HjOBayVb_slu#X>>pI?UywiwPaxo&J z^HJ$cmerQkb9cRWfd%TE#;3K-$$(PZ19QZtpmcgD*7u!H=X~bz1fI$H1#{tpdp$N< zn*ZqV`{0UncIVS(cH&C&UbUH>xYEG#kE2L?QUc85diuV}BD=^UyO=|fU8Km^aix^E zpSAuJY|>s;*eeTrVSAqPOpFf9-LFn?mJl7LS2BX9IPX`$5?iip5B@{TxI8~`TxuMdgQXBT#9 zO?-=We#Okr!(krf_wK7hv$3emjuukvsZp=SA48pUDg7k1XG~=m}U# zz`;bzS8i+KOiG-_n!kbMAKxc8`&(<$Ug}NjoO4RZF~5JLI=AsQ_S7x$y4Sf=b={no zN2&8n3ASG6nQ#8K>YVd?s2hI&NOf-GkzDFL6R&%nI}~;;d3lsN&!n{VI?sIb*IDN# z2@sTqk0o;tG?(&nE}lw_ag^cV;mf&6i!bMpGrpH|ttb+VG&B>*y_E}`F!w6wK9jB- zrqed;Ye67N*?3D{B1d~?|AzPONBD-fj>|U${W~t_HdA~@CQy7RbDues(;7Ese=cv0 z`serVI*cwhYR%t};ECgk*wl8}%$`YS>wTAhbCn+-nx%C;SNS?K(_^gVMlQ0q9ZxAz zWbI|3qX#nuj5v?14H|Dzm;63UUD^(^-84r$E|*elwPzeL@&;<_JRG;xc#E9#`(&Hp zYyKOtsZ6sL&3xlJv$!06YffkDy@024wcW^5)?UzIg!oFmfTv`y^G)e`r;#S=k>5WO zjjW-FXQd zHIkDI88Msxet?G$xd%oAAr94vavgEdrMqeGIiqTksGN(!E>-_EoT5sgK|fZ;QatTD zi{}S}i0qgn#I^8ZESmP8r9X)Z$L9;z(!TRhFi8C5-1%8(cR(isC$M4(J$ePl;U;KR zI~)8?&=8IzO$KCX>P0%HbsiRV+H~~@J3>L5)4^Lo9_bZ`EzQ~@wZvmJ&dG(L*uJ?g zx2J8a-+F*mpKc(pXmD_Ja4@(%$o@)nYI36VC_4o=BOrAl!G)t3lwp@EOz0;u^k2II zLnG;e^HWkZIv>MBqW*mNrD#xJ{Ri3hL@*o+LNV`hG}L3-dhewyY5VWgHgr(0G=SW& zy?1Fl?%3(N7Zw&~FU(wH_OjU#!emUFz0ooq(U6Uz$xDmz_`*D-pl;&C#Q&<P6LaTKh<< zeMD*BqvDiX0|)sOIkLhyq!01#;+>b}?MId7V`{TmG%JIRbXigc1ftmqU(MmCTc7D{ zs;OVOaOe5ev7e5AH1;n}%MDMg(}`RCYQxaoF?HuLb?1|xTu@KHpq`$R>m#4#dTXb> z(A6Z=GM&=6nmHa zuou^Yw)Dt7`<38hYVa{aC^@>R9eK=;!$#pa=qdMQ6n+eU790aeH#??zMLQ-v{E z7-MMwv}Ggclyz$r-r5n^=2}4sbpPa{saiB|hQhY5_3TffI(iPMIFnj9s0s&V;h>g~ z<*?@?YTpS(IH?LJW#Oc$wbZ7JI4AR_SwRsX2pW`y;KO8P%D{lwpuIM%fRE>vFvYBtbei?XN_A+3!a176AY5MC#+~UM(>z8!}g|c!a4(-M7uX3a#i|m)@ zD(T3uYm?rUEr+I*?4`1dY#V~h&R24+QOF;fBpHmH+2^ZdUFJ>ZE>Z~G47_W`3Ya5L zrV)k%0l^Y~i2vGkD?h&5xCh!}JJ164%aJ-uI47RPkv2;>8*+vCINoLn=R}z}QjB)x zdB8nS2lqTK-1G3$K4ykjSQ4^O2-1F~o&%@}(l^>;5 z3;r5f)mBHKVY%)|r*&Q;WGBf{N25OIubs=_7|@W80Wmucnz;>#aY$LraFOAF*oDN8 zEEWik_o){0XDHMV$32NU~&@4)8BsGbLB%4B5(xpK8v!gX8*gUAz1 zX>XFVM2U7F&!PG7>`hqQk0qM)mb!jrCLXm4BU2qn7ttf6=iw|8;fBKMt3m#8@ciN& zYVBqszfEgr+-SO#(jOvlqT-0O5Q|+8&%PKu`VtPHV`tr!pCZFE&&({$vIEAMU+MYq zbToc5cxW~%u~3Ud4gd0;--2~;i5-dop(sSY_tLJtmqrpr$Im_;d^!fK4`Ny!3GsAv zZb7<<`{3!riPE#u!W8f^7!4txxmc(}A|4G$RWuHu46@-*MWMYMz61oX0e%^9O&gkr z!_wJ>*_o-E8u(VueGkoE58sTPnV!~Utf3NOH+dovv!r2b)i#q~4unn9=Go>REW$8D zO>whI)5kRIkQwTl+wA7Zj1-=w!m=~Z0nd^o{BMv)Em=oHcgq~@&auJAFtF44QWTiw zR8&I>E40&(G!tks-8_0VW-hHcC!?%6b^o$iw5ZjqLbrm3(@)T#dbpCy*A^!iso8Xo z@;TOU7QZUsn|kw25?rYTWhzeWP!z2NVcc+-s=AJ4okkIzZ@7yJT5i#mB{=+Ay29cI z9C%?dE=d$Yj6U%*9jkP~HFUN~?TbQouZ4^iA`p#f{nB(?|1&%{LoFGN(5$9@La61p zueOwU^slp*yIK)oxqjL>6Jb8@}anBo5z?wed8;W1yJYV^>W5Wvi!|0X|IGOVcpz8k7eQDV>MaPG;Ywjlmslx-eXvV>5>L zj+C%N9(qa<&a1+CSvarxt3vOZFq{&G3E4ZV?tEGio>7HoWZ{{00$nt@EVVNOaRFoN z+%9iFrgR)vJB}0L)5>B3B&H!oT!78+COukt$2moKN)?`xg{LeLXX+dgckhsQ98tQ) z)UGj#xShq&z9wu>3ES`bxQw0QJ(Pm_?XX$;-jw6Rif}{~j>y6hE`2Z8!c1+SD)h-h zA5yCyRO)u9buf6N8`QBNMNg=GXB6SADx8&tvnC~7lgS&3qg{rD$-2kXy2qAbm1mn) zV{+%7waz1{&Lc|aQML2vTIcCh=jl&o|mz9H$E8-zlJS2-aB%onU>`aNB zz}dupRqU79k5}MI#@r9eUfnD=4=PnV)T$kpV>XA^S`MUI4k#^;t1XYOwVX(`ocN?x zzVN)#@`Bp(f+9?+!lW!r-oqwBk5b*QR`)L#LfA36pfr!D%_CIMhI_5ut4&Jl9<`Ne zb~NAX=#~2pC>;mYj)Sbvv2ymTj;Vb|KE9wHos_G-MYwZ_Im<`a-HW$vO>9YtEy)f= z>{i8Ynf)@w%N+5^{Ak2-mG1jO(GN;qEm_IusNi}Dw@!rFhF?Ua=>O;c4l^YkYzCg5 zzXb0NU+r?&PqbTRBu-d5iCtfc({VZYYm@Yl`%20cv%*BFU0JjM9D0uPCy*nPM5OV( zeQl21KRibc9MJMjUMB9$ogE!y>2Ori9dC52*;oV8%6#X(OYhJdjO4)zG5$jVkjks6tPzodu6c~ zJ}S*i?J4XWbsSa1W2$&e7LOVJJt?tA?){b`hE*{vv)_G+q%S4*DdK=C4#?tw5m<}l zfFd4L#e=eV(D2tHc|sANRK+J{@k!(;P{d3{Flq7Q8ERTm4Fki0-R#5mHF0}N+7TAN1RlSz)T9@$f5+bk@(5^Nc4u38ErIfrB@KhGIYA(k?3?$eb9%F zInl#3Xnq^fV9>O5)1ZM3J?wPBVv4ON-YWlRM z^M4IzE9U~CNANLX1!CF6&fmEcLD#;`M*+w${~qZeN80619yiNLgv63^67A=isuK*L zxuH<{6g&>wl?SP}h!R#@FGaOQlysQ9*h!N(@)ql-19;X1P>OM9Haq=YJSTcwI!%^2 z{|t2;f)3DeGz|>4zdB|XeqcQb8YJvI2CNWtUUJg&AG0fO;DjIi{;oB3ovEn} zYg4ucmVS+zklI!H@5tFtPCwc~vbX5Vc$4VgyhUxqnG9Thoko3#MozXyMMkypOS~7E zoTq+29=-#A3qp79>|*T7VJ4sD0=8LX+OnArwkh^A)jdlZga8=OCdsThUYcdtNU6^e zz)(J%Uzm?t-`eoQOlctT2gdASWy*%M8rdDPn}*At-Zhdh+k(-&9eZ}~8Oc@%sfhZX zz1y9kZrAM6?aYF8&r?UEUFcUl8#yJVA~1;3%`C5;K%`cQhs?TWNjsy!CI`TD3%qm^ zko0TjN!EQzcj46^x{h#ugG1julYIS7CwFD6|$K{l#K$*CdgF9)luhr;GBK=&V zM48!$Qt%ke9;H2reXP;gQ!LosGcb2;XRv1={L&0u&=hiCB=>ss(p+fYzC*LKJZ$Jf zs6e9COu7Vkl@_QQ{w_YnDUI7& z)onv}cglNDeR2tcTIaoIN#C*N9O*m4{l2&yczwruAx_rK3+&K&pth9QmON?YftuDt zyLYgi9p! ze}jz|3;O{d=jH6fma($+Z+NdwZv7k1GCjgK8Ag8V>F^P`<#fDKJ?eokA>2*0WgB18 z8PszqAU%m3Y75=7ba#%NXUVxp&I{!HIyrA4xRZ$|yWp+{7PSQeA1XI9T^Q^Z$*wZU zd7C1HR3Rh_A@Vg;XsuGE?qOYhnM@y(-9yAXR3N*;4#wRXf^ye0B7L+@@UkJIidnia z)~o2kaGYJ38kjoM=Xb$H(|T3<%-oHo=d+2nSvoH7j);k0)&`4n7{+5-A6k5Q$N2#9 zeaPbkQ8BF_++iu_2J`&I|5kGjLT0KfUY+jZXngJC4vIMm839|C`Xn;qxU6kwTcy$N zjpMSbIMX*-%Hdf3Tk^xntT_p%5TFjwH9mbULT0NXGcnQ-fUTU(pI&0eeGq*cIF{IJW^G-~OdZ>~b9i{fy17f6#N#26Aq~=>BJ-Lps6V4S5`2{Y z7jpiboKN7StLR)JKCf^fqMhFPaRMx~VTg?D#8>*4^dUuSA-iegOjy1A3C&I;H<^p` z8wT_e{b# z9ZW`v>ms{GG=>ZvWGM`P|d$xoP>zta{};a>WvnAlS)(vwUP-EcwCstK%!X z6|qef+hq15U{wp?tZbU5SY`he_ylWZzwVX(QMSn*wjOe<(qUOc=59HpZ_6S5TMn7G z<&gRIkT?_0m$g&@d+4ko3$`4x&>k`(;g))X8q=9a?zUuONj z+}`5Q6UcVNDQ_wN1N6HOdEW!um9737B8|5E7i10Fl=V$> z);B?W{MOX0Pqzw}3clBjvT|lJOP_98Du}fHrte+n9(COozHW1DHxzy^i0`;YMwV3D zmkJ{tzv+u~zU$cRnx}Lh_I;NlU9;sF_xYhaYt4r2y_w~$$C)?ejN8b_QnKExsr0#~ z((jrIm#xo|%0Sjsw!5a{QqPe_z|w<>TAjg%mTes8>_WoQ;Pul*7MVItcl?KHV>b) zpI%bh(gAYD;3P^eERah1nM+q;TpUuFr0{N+-b|D^gf}5kYL(rT?opKgj-3CKocrYb zU*!B-IOzfns+$M+_ocUA`U~>I$*Tn1O_FCi7lz=*(*bVuEnRHVM20}1MKhTM1Ysx7 z&n)(RnL98vE$tz8u+U^mFOfU2V>*!wNl34_rSu+sO-L5Q&uP2DxR}&pTGW?U)ITBo>*Siv@82P2`QO8# z#aD@1*!7vGC2-Y?MOSOrk86KeyL#}WLrTj5wdKHiQ`>tp@64=&jY_5kcY-T z4lCU!)b11OZQVcK`@_Af&m#|Q2h_F$>m5Bme(s0Qt-k!RSLrycb{xhzw@vt-oQmQ) zoL&oO`H77tPf?jF0*Kv{68C&mr-);!I3|l@C>Fhd7zGJy$o(&TwD?CifB)tu&GN)I zm4g@6gBRh4pCV4G;-oB2vISU|CUqeXUjF#$KY#8|o|Dgn<*A5rG^!qr!V^D5yrPO% zWbq0M-?}Dh3$}}T^o{a2o>LA^s0SzH-i!Dt;`6HbyevM?f^R)H$Ph|US^L)BH}|eQ zlRT$Xbg30x%VX0rweBqn&^H&scMipmd zaR%imsre@GTtuF}svLh& zJ^rH7JFE82DwT6;<=k@Meqr%4$<(|yz9!NtTW;IAIt)kqDdH|w+$FOg2_Wc!l6$$Z z;r+r&xiH9b99$E-Q;^RKoxFP!4*x0Q6RP-x%zkV^&japL#QmzcUl#YHG$g%)oo>>* zmpjhi-SvBW|LNY3zWGUuGJHxMJ_RrSGZ}A&oJI4RKt`YC?r-Sdyzt36W%!Ibd^GFr=hV^X;K_fAFr^ApvhmZFuzZD& z>4xsv61EA`iFFB^1uAziJs|LiM7y>sU}!eSvy^K_T&*lwtjZSBgbZ3kO8zAnPyS8c zE#G{J+2EnwxIC149}4R`6(8BASb~rxPH4RAMmg^l zR=qy+qbjAa3v_|Rczb^~dUxi}DwN)%YVT3zvEJM*Hw@sne0urxrwuSg`u6yp@#WJL zA@&?tgtxc~y)5+O$GRcoji$_fkyudH7ti9x+83oJv>GZ;yR!ANr5v<2V#wh1km7?+sie$tyOs?o5e0?@nS`omIqd zs^T|g<41VaMG}*Os||NvZHSD+gLm;@tzvTp<30o8QDWTcSsN~C((C28s`>)Tm)b(x-YO!^E#3Mo3}U{hh7Xj=~ya2eWZ&uFGJl%%7-V! zrx;%nrbC+7k=Ps2(3F2Qb+^gKOJlJk$r znIPxa$+<<&uaNT&Me`!vT_&fE9v_hN1v&qToKkv0XB0^lVNhG}_kt~Ts8~GA#hf24}Auw24CFh@!^Sk8yb8;?FEL4RtJb511ba?&)d6F{) z&)5NU>@QwH9c%yAE9+iAwle(2leeFI<@9<*?dv0N?76+?l~c_BjT5&|Sbb}nR`OQj zZ;N+C*;ln*+q_c!cFmm{*;mazU1@t`-|c<)xVdBHrMGX~xgq-+(3Gv+$?8?#Z#BH% zAp4rugPqB|Wc;_p_Yp;-?Ww7KWjcA`?b$oCvaf!_SLuiDgp1Qx=-;>D;Z9wp9}Uf& zzTN(+4G(uD?-8#bqRuAwiJUS&WQw`d*+`GvIneIMxqRF?p0$!LeR5NN}WMwbXAkqE<(O35iBhiMvC;4WR9N$1Fd6xD_-ZQ-Sh zGH;GA&Pb%dso6dICB4mAh^6yMJZ5(0l0=pkn6Y+=)Cbui>e9cVJ1UGO$~8)Nd*OU3 z?8hR0aW;BD`WWMe##3wvV+Zub>)puld13zhzg4He(ifUZ+~G8bSbyN`(1qL?yls0#@_tG{C@w@0xw2|yStMA8GG}KEbrNP`TqbQ CrOG=1 delta 6679 zcmc&&dvKH2mH!@+{mGIn$$Hq9ZTTTT5MW~n7-HU-@JbCBLzp^HA^d&VB9di)`fQA2 z$1qKUOGtSf5?n}FCCd4(+SqvsA5^BFydpbLhJI|b+Q~wOh zh>XYujKS=d?4YT|M9X}@9JI7pf;lZYL2HYZWjJOdBWj*!M6IO1%rnee_@78EHd>c~ zb(s?TP@P>Ani-F7f;9CQppk9cq;J|N`^E404u`rGHKS>R>*DA@I6)EW9 z)Qo0-TZb>8a!rbdS2LTo`+I}FPF35qxi!>*pADf71y*qzuddpBi_e@e;OG+qWH&b2 z`wXq2KuE4HaUVEvVEF_~_*$V$kXSQ}3H312SS_kLpD6D3i*1skW?{BjktLZVZ zzGSW*+E&jNR3Kf=3`@#RsaKIJu_RX^cm#Pa+}yr}T>#dmC7g?aElp?GTDY83S4UXM z^`xIMDQc?}2zb3eVdWa6hKVB9$O}j-dkVA+RPONUYExy@sDt|h10y^Ban%rWfEC&2 zg%*>@JY#z57;h ziXDy~w!v7t8LH1%;oiPt82F)`6=3jhis3zf4rsJ4qX`+&)Xy9=(V7dZOz`KUTC0$% zIcQGh;NFwDa4gToS|N5c7cTmoJQtyP)&y5xDqwBk``7`0ztjxc*PWUHzMqG#i@8v} zI)~>Y4q%tra}A>LGKVtbe!F549FmEuZl0tH zVO{XWJ{M?DS&i5s)j2UM9%luw_gVE*^uK6Ec{r1#B3;00^5MOcdieOX1x{SjX;8ir z6vUNHGHJf0aK&53iDvj>&^EKCjOZC|oU)La{vqFK?q|I2R6iWUO=s12DKRVeEK{dC zI(MnaSU#;=Iqh~k*{yuGZb|dYCLGCLqzO4zz>#-~D>R}5*U9n7Dq(%9ZBhqr_{EC_ z`YCE9nGu@{4~|-=Q4@w&K5N^&+1kRQQ%+pFsc~&&Tzmdy+~jYOjZPD>n@Gx#ExaaD z1x=?*K&P?5$yIvXT_#@JuO)V8)QC=EdC5JvabJ~jPHGgK^&GIAa3yykCl)~QnNoea zKH`D3pE{_NQ?$869==E{qP8?epGQ~Jbj`-jA%=h!5t&bxA0qRzVcWi9WA!w))ic<- zDBE;BvL&i4ri`bkGZ|0Orx;hy&Y1W}nsB}~GZ>dp#_9Sb<5J3aiaL|=6n%=^ce4sN|@;ge;y8f`T{U8+fzsCCbCK5T`GKP3ahOmkt(!6oP+ z9At~tr5fNg>d5Y7%cy$O^vF_bI9sAQX!cXov~sO!+QGDsBt9NnKb{)<;KJ!bZ%p?P z%O7^*N>Qp0!-y3#r-RaxLzX6#ww_1}c+zCxZcAyWe!4z3KB|vHsR3~K%14G@k*dYv z)A`6ss(=roHg*M7&>X6usro5Zq-h6NrAA7sVs)xKQx!NO@zf<3^?2G^jB8S@kpG$) zJHbPC0=tHs9-KIr3XpD`+!pg5qNc^dZjWsEs8efA4=~dNapBgc9d%GkXi_9SgFOqAdhCh`zu5@x_acQ4xqMV(GC_hNzZ6h2TLb>qXCx6bp^grzRZ$IIjJz@9C9P!Q2W1Zd zBk7uroEkA=4t58d{e6;LOltH5N(f-&h*Az>qqPCNHUu$rmr1Cn>S?(F%aW|>lhYsq z&=6S73?;&GoQZNcAT_s3k|LA92WQb4_U`xL1wl3WI}S*A8)%i11RbSgK~CHq5*ko- zR=h|gFEv6Z2KnCoN(cP&%at(k^3n=bBL)46YV7oNhb6p8BxY|2E1}L*Nfu!5D;r?s zl`=b3Z9}`1P>~X9YYRvs#9wjOt2yCTS&}-!?I9)AQ8nVscZ3e5ZZSG}E}4U-4R2Yh zK(&s-g4$oOP}QTp6*&~3;`1n>%7^`Ji5r3rdy#t$NkG~*~kqXLfh>dKhli3^^n;F(Tc9TVoog?Uk79;FtH3I)S! zV}dI#xT1pVzM~Kh4eVw=hffDqAR3mbxbH46^e#M_@5t} z;a`8eoWGw}429>&q-lo1kQ(LebEL;e)7z-HMR8$KR9N(_y^>bMFC6wk zUsgfR#+a}vE^LaXesb;AZSQ`>=Kh57uykY4n{Y|OzJm-@r5sonT^p;6Rr)y`&?FHxdp4l5&9Ctr9 z=3X}HUKVrL$KCbU*Tq+C{cQV=`1ZD#y*+MkAKEZ(FvksrV}{C6LuI5VW~hl9YNCc3 z(A*?XHs@|ew$R1POf23vt{Yj^q_FZ?%*)4!STl^|0s}{H&Er%CetK(HAu-$5^rHk8 z$l*`gf4QHBI}L^K8NGkwLQ7 zOp%!x*$iY7I=;gY)Dh&fL6PiA;t-XZUr`@o#=IT|ZTq+Xxco!YSCQ^EP<0dyA=oZJh{4@E(%OXuNOI6%b zHIy+vORJpRbDlGv;mwh*SdJ&2;~B~vcNCoSpYcbGBV94a!nkAMkbXSddeVQ~Kdc>A zV%g>K?D8T0o(REOcQHF#qG zb_{pic!3Z#tiT^01|N;DW_2T|1$3J|44Blqiaw%2vH~7$Qu#f;K)0mog1%la&B}cY z98?SO-SwEdl=~wCH`FEUefXogdys55Po7L}UGho-ggZ7lAMV1wqy9 z4}}8qbEJHPz!d_A2~aOhqHZ}zfVhA8B!PgsZ#1icb diff --git a/qt_app_pyside1/ui/__pycache__/violations_tab.cpython-311.pyc b/qt_app_pyside1/ui/__pycache__/violations_tab.cpython-311.pyc index b66c6ce11939cde3d993edfbd7decaa2ee52f9f1..fc101dc76e008def160b911ca3dae02841874099 100644 GIT binary patch delta 93 zcmeyngt2caBlmJ%UM>b82vJ?Jk$VEOW2m!LOlWaxQE^O4YFTnlerZaKOKNd;Nq#|$ tTV`HjPHIeYPHJMFZW@TATToh@5mQ(apIA^3Urb8P@NvLk$VEOfs2b(j7xrbUQT{uN^wkKNqk~KL3}}Fab`+t SOm;?MNn$Rl;O0BboBRONaU2Z* diff --git a/qt_app_pyside1/ui/analytics_tab.py b/qt_app_pyside1/ui/analytics_tab.py index 4a2c6b3..e4e8941 100644 --- a/qt_app_pyside1/ui/analytics_tab.py +++ b/qt_app_pyside1/ui/analytics_tab.py @@ -1,662 +1,563 @@ from PySide6.QtWidgets import ( QWidget, QVBoxLayout, QHBoxLayout, QLabel, - QGroupBox, QPushButton, QScrollArea, QSplitter + QPushButton, QTabWidget, QTableWidget, QTableWidgetItem, QHeaderView ) from PySide6.QtCore import Qt, Slot -from PySide6.QtCharts import QChart, QChartView, QLineSeries, QPieSeries, QBarSeries, QBarSet, QBarCategoryAxis, QScatterSeries, QValueAxis -from PySide6.QtGui import QPainter, QColor, QPen, QFont, QBrush, QLinearGradient, QGradient +from PySide6.QtGui import QColor, QFont -class ChartWidget(QWidget): - """Base widget for analytics charts""" - def __init__(self, title): - super().__init__() - self.layout = QVBoxLayout(self) - self.layout.setContentsMargins(0, 0, 0, 0) - - # Chart title - self.title_label = QLabel(title) - self.title_label.setAlignment(Qt.AlignCenter) - self.title_label.setStyleSheet("font-weight: bold; font-size: 14px;") - self.layout.addWidget(self.title_label) - - # Create chart - self.chart = QChart() - self.chart.setAnimationOptions(QChart.SeriesAnimations) - self.chart.setBackgroundBrush(QBrush(QColor(240, 240, 240))) - self.chart.legend().setVisible(True) - self.chart.legend().setAlignment(Qt.AlignBottom) - - # Chart view - self.chartview = QChartView(self.chart) - self.chartview.setRenderHint(QPainter.RenderHint.Antialiasing) - self.layout.addWidget(self.chartview) - - self.setMinimumSize(400, 300) -class TimeSeriesChart(ChartWidget): - """Time series chart for traffic data""" - def __init__(self, title="Traffic Over Time"): - super().__init__(title) - - # Create series - self.vehicle_series = QLineSeries() - self.vehicle_series.setName("Vehicles") - self.vehicle_series.setPen(QPen(QColor(0, 162, 232), 2)) - - self.pedestrian_series = QLineSeries() - self.pedestrian_series.setName("Pedestrians") - self.pedestrian_series.setPen(QPen(QColor(255, 140, 0), 2)) - - self.violation_series = QLineSeries() - self.violation_series.setName("Violations") - self.violation_series.setPen(QPen(QColor(232, 0, 0), 2)) - - self.traffic_light_color_series = QLineSeries() - self.traffic_light_color_series.setName("Traffic Light Color") - self.traffic_light_color_series.setPen(QPen(QColor(128, 0, 128), 2, Qt.DashLine)) - - # Add series to chart - self.chart.addSeries(self.vehicle_series) - self.chart.addSeries(self.pedestrian_series) - self.chart.addSeries(self.violation_series) - self.chart.addSeries(self.traffic_light_color_series) - - # Create and configure axes - self.chart.createDefaultAxes() - x_axis = self.chart.axes(Qt.Horizontal)[0] - x_axis.setTitleText("Time") - x_axis.setGridLineVisible(True) - x_axis.setLabelsAngle(45) - - y_axis = self.chart.axes(Qt.Vertical)[0] - y_axis.setTitleText("Count") - y_axis.setGridLineVisible(True) - - def update_data(self, time_series): - """Update chart with new time series data""" - try: - if not time_series or 'timestamps' not in time_series: - return - - # Check if chart and series are still valid - if not hasattr(self, 'chart') or self.chart is None: - return - if not hasattr(self, 'vehicle_series') or self.vehicle_series is None: - return - - timestamps = time_series.get('timestamps', []) - vehicle_counts = time_series.get('vehicle_counts', []) - pedestrian_counts = time_series.get('pedestrian_counts', []) - violation_counts = time_series.get('violation_counts', []) - traffic_light_colors = time_series.get('traffic_light_colors', []) - - # Clear existing series safely - try: - self.vehicle_series.clear() - self.pedestrian_series.clear() - self.violation_series.clear() - self.traffic_light_color_series.clear() - except RuntimeError: - # C++ object was already deleted, skip update - return - - # Add data points - for i in range(len(timestamps)): - try: - # Add x as index, y as count - self.vehicle_series.append(i, vehicle_counts[i] if i < len(vehicle_counts) else 0) - self.pedestrian_series.append(i, pedestrian_counts[i] if i < len(pedestrian_counts) else 0) - self.violation_series.append(i, violation_counts[i] if i < len(violation_counts) else 0) - - # Add traffic light color as mapped int for charting (0=unknown, 1=red, 2=yellow, 3=green) - if i < len(traffic_light_colors): - color_map = {'unknown': 0, 'red': 1, 'yellow': 2, 'green': 3} - color_val = color_map.get(traffic_light_colors[i], 0) - self.traffic_light_color_series.append(i, color_val) - except RuntimeError: - # C++ object was deleted during update - return - - # Update axes safely - try: - axes = self.chart.axes(Qt.Horizontal) - if axes: - axes[0].setRange(0, max(len(timestamps)-1, 10)) - - max_count = max( - max(vehicle_counts) if vehicle_counts else 0, - max(pedestrian_counts) if pedestrian_counts else 0, - max(violation_counts) if violation_counts else 0 - ) - axes = self.chart.axes(Qt.Vertical) - if axes: - axes[0].setRange(0, max(max_count+1, 5)) - except (RuntimeError, IndexError): - # Chart axes were deleted or not available - pass - - # Optionally, set y-axis label for traffic light color - axes = self.chart.axes(Qt.Vertical) - if axes: - axes[0].setTitleText("Count / TL Color (0=U,1=R,2=Y,3=G)") - except Exception as e: - print(f"[WARNING] Chart update failed: {e}") - -class DetectionPieChart(ChartWidget): - """Pie chart for detected object classes""" - def __init__(self, title="Detection Classes"): - super().__init__(title) - - self.pie_series = QPieSeries() - self.chart.addSeries(self.pie_series) - - def update_data(self, detection_counts): - """Update chart with detection counts""" - try: - if not detection_counts: - return - - # Check if chart and series are still valid - if not hasattr(self, 'chart') or self.chart is None: - return - if not hasattr(self, 'pie_series') or self.pie_series is None: - return - - # Clear existing slices safely - try: - self.pie_series.clear() - except RuntimeError: - # C++ object was already deleted, skip update - return - - # Add new slices - for class_name, count in detection_counts.items(): - # Only add if count > 0 - if count > 0: - try: - slice = self.pie_series.append(class_name, count) - - # Set colors based on class - if class_name.lower() == 'car': - slice.setBrush(QColor(0, 200, 0)) - elif class_name.lower() == 'person': - slice.setBrush(QColor(255, 165, 0)) - elif class_name.lower() == 'truck': - slice.setBrush(QColor(0, 100, 200)) - elif class_name.lower() == 'bus': - slice.setBrush(QColor(200, 0, 100)) - - # Highlight important slices - if count > 10: - slice.setExploded(True) - slice.setLabelVisible(True) - except RuntimeError: - # C++ object was deleted during update - return - except Exception as e: - print(f"[WARNING] Pie chart update failed: {e}") - -class ViolationBarChart(ChartWidget): - """Bar chart for violation types""" - def __init__(self, title="Violations by Type"): - super().__init__(title) - - # Create series - self.bar_series = QBarSeries() - self.chart.addSeries(self.bar_series) - - # Create axes - self.axis_x = QBarCategoryAxis() - self.chart.addAxis(self.axis_x, Qt.AlignBottom) - self.bar_series.attachAxis(self.axis_x) - - self.chart.createDefaultAxes() - self.chart.axes(Qt.Vertical)[0].setTitleText("Count") - - def update_data(self, violation_counts): - """Update chart with violation counts""" - try: - if not violation_counts: - return - - # Check if chart and series are still valid - if not hasattr(self, 'chart') or self.chart is None: - return - if not hasattr(self, 'bar_series') or self.bar_series is None: - return - if not hasattr(self, 'axis_x') or self.axis_x is None: - return - - # Clear existing data safely - try: - self.bar_series.clear() - except RuntimeError: - # C++ object was already deleted, skip update - return - - # Create bar set - bar_set = QBarSet("Violations") - - # Set colors - try: - bar_set.setColor(QColor(232, 0, 0)) - except RuntimeError: - return - - # Add values - values = [] - categories = [] - - for violation_type, count in violation_counts.items(): - if count > 0: - values.append(count) - # Format violation type for display - display_name = violation_type.replace('_', ' ').title() - categories.append(display_name) - - if values: - try: - bar_set.append(values) - self.bar_series.append(bar_set) - - # Update x-axis categories - self.axis_x.setCategories(categories) - - # Update y-axis range - y_axes = self.chart.axes(Qt.Vertical) - if y_axes: - y_axes[0].setRange(0, max(values) * 1.2) - except RuntimeError: - # C++ object was deleted during update - return - except Exception as e: - print(f"[WARNING] Bar chart update failed: {e}") - -class LatencyChartWidget(ChartWidget): - """Custom latency chart with spikes, device/res changes, and live stats legend.""" - def __init__(self, title="Inference Latency Over Time"): - super().__init__(title) - self.chart.setBackgroundBrush(QBrush(QColor(24, 28, 32))) - self.title_label.setStyleSheet("font-weight: bold; font-size: 16px; color: #fff;") - self.chart.legend().setVisible(False) - # Main latency line - self.latency_series = QLineSeries() - self.latency_series.setName("Latency (ms)") - self.latency_series.setPen(QPen(QColor(0, 255, 255), 2)) - self.chart.addSeries(self.latency_series) - # Spikes as red dots - self.spike_series = QScatterSeries() - self.spike_series.setName("Spikes") - self.spike_series.setMarkerSize(8) - self.spike_series.setColor(QColor(255, 64, 64)) - self.chart.addSeries(self.spike_series) - # Device/resolution change lines (vertical) - self.event_lines = [] - # Axes - self.chart.createDefaultAxes() - self.x_axis = self.chart.axes(Qt.Horizontal)[0] - self.x_axis.setTitleText("") - self.x_axis.setLabelsColor(QColor("#fff")) - self.x_axis.setGridLineColor(QColor("#444")) - self.y_axis = self.chart.axes(Qt.Vertical)[0] - self.y_axis.setTitleText("ms") - self.y_axis.setLabelsColor(QColor("#fff")) - self.y_axis.setGridLineColor(QColor("#444")) - # Stats label - self.stats_label = QLabel() - self.stats_label.setStyleSheet("color: #00e6ff; font-size: 13px; font-weight: bold; margin: 2px 0 0 8px;") - self.layout.addWidget(self.stats_label) - - def update_data(self, latency_data): - """ - latency_data: dict with keys: - 'latencies': list of float, - 'spike_indices': list of int, - 'device_switches': list of int, - 'resolution_changes': list of int - """ - if not latency_data or 'latencies' not in latency_data: - return - latencies = latency_data.get('latencies', []) - spikes = set(latency_data.get('spike_indices', [])) - device_switches = set(latency_data.get('device_switches', [])) - res_changes = set(latency_data.get('resolution_changes', [])) - # Clear series - self.latency_series.clear() - self.spike_series.clear() - # Remove old event lines - for line in self.event_lines: - self.chart.removeAxis(line) - self.event_lines = [] - # Plot latency and spikes - for i, val in enumerate(latencies): - self.latency_series.append(i, val) - if i in spikes: - self.spike_series.append(i, val) - # Add device/resolution change lines - for idx in device_switches: - line = QLineSeries() - line.setPen(QPen(QColor(33, 150, 243), 3)) # Blue - line.append(idx, min(latencies) if latencies else 0) - line.append(idx, max(latencies) if latencies else 1) - self.chart.addSeries(line) - line.attachAxis(self.x_axis) - line.attachAxis(self.y_axis) - self.event_lines.append(line) - for idx in res_changes: - line = QLineSeries() - line.setPen(QPen(QColor(255, 167, 38), 3)) # Orange - line.append(idx, min(latencies) if latencies else 0) - line.append(idx, max(latencies) if latencies else 1) - self.chart.addSeries(line) - line.attachAxis(self.x_axis) - line.attachAxis(self.y_axis) - self.event_lines.append(line) - # Update axes - self.x_axis.setRange(0, max(len(latencies)-1, 10)) - self.y_axis.setRange(0, max(max(latencies) if latencies else 1, 10)) - # Stats - if latencies: - avg = sum(latencies)/len(latencies) - mx = max(latencies) - self.stats_label.setText(f"Avg: {avg:.1f}ms | Max: {mx:.1f}ms | Spikes: {len(spikes)}") - else: - self.stats_label.setText("") - -class FPSChartWidget(ChartWidget): - """FPS & Resolution Impact chart with device/resolution change lines and live stats.""" - def __init__(self, title="FPS & Resolution Impact"): - super().__init__(title) - self.chart.setBackgroundBrush(QBrush(QColor(24, 28, 32))) - self.title_label.setStyleSheet("font-weight: bold; font-size: 16px; color: #fff;") - self.chart.legend().setVisible(False) - self.fps_series = QLineSeries() - self.fps_series.setName("FPS") - self.fps_series.setPen(QPen(QColor(0, 255, 255), 2)) - self.chart.addSeries(self.fps_series) - self.event_lines = [] - self.chart.createDefaultAxes() - self.x_axis = self.chart.axes(Qt.Horizontal)[0] - self.x_axis.setLabelsColor(QColor("#fff")) - self.x_axis.setGridLineColor(QColor("#444")) - self.y_axis = self.chart.axes(Qt.Vertical)[0] - self.y_axis.setTitleText("FPS") - self.y_axis.setLabelsColor(QColor("#fff")) - self.y_axis.setGridLineColor(QColor("#444")) - self.stats_label = QLabel() - self.stats_label.setStyleSheet("color: #00ff82; font-size: 13px; font-weight: bold; margin: 2px 0 0 8px;") - self.layout.addWidget(self.stats_label) - def update_data(self, fps_data): - if not fps_data or 'fps' not in fps_data: - return - fps = fps_data.get('fps', []) - device_switches = set(fps_data.get('device_switches', [])) - res_changes = set(fps_data.get('resolution_changes', [])) - device_labels = fps_data.get('device_labels', {}) - res_labels = fps_data.get('resolution_labels', {}) - self.fps_series.clear() - for line in self.event_lines: - self.chart.removeAxis(line) - self.event_lines = [] - for i, val in enumerate(fps): - self.fps_series.append(i, val) - for idx in device_switches: - line = QLineSeries() - line.setPen(QPen(QColor(33, 150, 243), 3)) - line.append(idx, min(fps) if fps else 0) - line.append(idx, max(fps) if fps else 1) - self.chart.addSeries(line) - line.attachAxis(self.x_axis) - line.attachAxis(self.y_axis) - self.event_lines.append(line) - for idx in res_changes: - line = QLineSeries() - line.setPen(QPen(QColor(255, 167, 38), 3)) - line.append(idx, min(fps) if fps else 0) - line.append(idx, max(fps) if fps else 1) - self.chart.addSeries(line) - line.attachAxis(self.x_axis) - line.attachAxis(self.y_axis) - self.event_lines.append(line) - self.x_axis.setRange(0, max(len(fps)-1, 10)) - self.y_axis.setRange(0, max(max(fps) if fps else 1, 10)) - # Live stats (current FPS, resolution, device) - cur_fps = fps[-1] if fps else 0 - cur_res = res_labels.get(len(fps)-1, "-") - cur_dev = device_labels.get(len(fps)-1, "-") - self.stats_label.setText(f"Current FPS: {cur_fps:.1f} | Resolution: {cur_res} | Device: {cur_dev}") - -class DeviceSwitchChartWidget(ChartWidget): - """Device Switching & Resolution Changes chart with colored vertical lines and legend.""" - def __init__(self, title="Device Switching & Resolution Changes"): - super().__init__(title) - self.chart.setBackgroundBrush(QBrush(QColor(24, 28, 32))) - self.title_label.setStyleSheet("font-weight: bold; font-size: 16px; color: #fff;") - self.chart.legend().setVisible(False) - self.event_lines = [] - self.chart.createDefaultAxes() - self.x_axis = self.chart.axes(Qt.Horizontal)[0] - self.x_axis.setLabelsColor(QColor("#fff")) - self.x_axis.setGridLineColor(QColor("#444")) - self.y_axis = self.chart.axes(Qt.Vertical)[0] - self.y_axis.setTitleText("-") - self.y_axis.setLabelsColor(QColor("#fff")) - self.y_axis.setGridLineColor(QColor("#444")) - self.legend_label = QLabel() - self.legend_label.setStyleSheet("color: #ffb300; font-size: 13px; font-weight: bold; margin: 2px 0 0 8px;") - self.layout.addWidget(self.legend_label) - def update_data(self, event_data): - if not event_data: - return - cpu_spikes = set(event_data.get('cpu_spikes', [])) - gpu_spikes = set(event_data.get('gpu_spikes', [])) - switches = set(event_data.get('switches', [])) - res_changes = set(event_data.get('res_changes', [])) - n = event_data.get('n', 100) - for line in self.event_lines: - self.chart.removeAxis(line) - self.event_lines = [] - for idx in cpu_spikes: - line = QLineSeries() - line.setPen(QPen(QColor(255, 64, 64), 2)) - line.append(idx, 0) - line.append(idx, 1) - self.chart.addSeries(line) - line.attachAxis(self.x_axis) - line.attachAxis(self.y_axis) - self.event_lines.append(line) - for idx in gpu_spikes: - line = QLineSeries() - line.setPen(QPen(QColor(255, 87, 34), 2)) - line.append(idx, 0) - line.append(idx, 1) - self.chart.addSeries(line) - line.attachAxis(self.x_axis) - line.attachAxis(self.y_axis) - self.event_lines.append(line) - for idx in switches: - line = QLineSeries() - line.setPen(QPen(QColor(33, 150, 243), 2)) - line.append(idx, 0) - line.append(idx, 1) - self.chart.addSeries(line) - line.attachAxis(self.x_axis) - line.attachAxis(self.y_axis) - self.event_lines.append(line) - for idx in res_changes: - line = QLineSeries() - line.setPen(QPen(QColor(255, 167, 38), 2)) - line.append(idx, 0) - line.append(idx, 1) - self.chart.addSeries(line) - line.attachAxis(self.x_axis) - line.attachAxis(self.y_axis) - self.event_lines.append(line) - self.x_axis.setRange(0, n) - self.y_axis.setRange(0, 1) - self.legend_label.setText("CPU Spikes: {} | GPU Spikes: {} | Switches: {} | Res Changes: {}".format(len(cpu_spikes), len(gpu_spikes), len(switches), len(res_changes))) - -class AnalyticsTab(QWidget): - """Analytics tab with charts and statistics""" +class CleanAnalyticsWidget(QWidget): + """Clean and minimal analytics widget with tabbed interface""" def __init__(self): super().__init__() - self.initUI() + # Data storage for real-time updates + self.latest_traffic_lights = [] + self.latest_violations = [] + self.latest_vehicles = [] + self.latest_frame_data = {} + self.init_ui() - def initUI(self): - """Initialize UI components""" - main_layout = QVBoxLayout(self) + def init_ui(self): + """Initialize the clean UI with tabs""" + layout = QVBoxLayout(self) + layout.setContentsMargins(10, 10, 10, 10) + layout.setSpacing(10) - # Add notice that violations are disabled - notice_label = QLabel("⚠️ Violation detection is currently disabled. Only object detection statistics will be shown.") - notice_label.setStyleSheet("font-size: 14px; color: #FFA500; font-weight: bold; padding: 10px;") - notice_label.setAlignment(Qt.AlignCenter) - main_layout.addWidget(notice_label) + # Set dark background for the main widget + self.setStyleSheet(""" + QWidget { + background-color: #2C3E50; + color: #FFFFFF; + } + """) - # Charts section - charts_splitter = QSplitter(Qt.Horizontal) + # Title + title_label = QLabel("🚦 Traffic Intersection Monitor") + title_label.setStyleSheet(""" + QLabel { + font-size: 20px; + font-weight: bold; + color: #FFFFFF; + font-family: 'Roboto', Arial, sans-serif; + padding: 15px; + background: qlineargradient(x1:0, y1:0, x2:1, y2:0, + stop:0 #2C3E50, stop:1 #34495E); + border-radius: 8px; + border: 1px solid #34495E; + } + """) + title_label.setAlignment(Qt.AlignCenter) + layout.addWidget(title_label) - # Latency chart (top, full width) - self.latency_chart = LatencyChartWidget("Inference Latency Over Time") - main_layout.addWidget(self.latency_chart) + # Create tab widget + self.tab_widget = QTabWidget() + self.tab_widget.setStyleSheet(""" + QTabWidget::pane { + border: 1px solid #34495E; + border-radius: 8px; + background-color: #2C3E50; + } + QTabBar::tab { + background: #34495E; + color: #FFFFFF; + padding: 12px 20px; + margin-right: 2px; + border-top-left-radius: 8px; + border-top-right-radius: 8px; + font-family: 'Roboto', Arial, sans-serif; + font-weight: 500; + min-width: 120px; + } + QTabBar::tab:selected { + background: #3498DB; + color: white; + } + QTabBar::tab:hover:!selected { + background: #2C3E50; + } + """) - # Left side - Time series chart - self.time_series_chart = TimeSeriesChart("Traffic Over Time") - charts_splitter.addWidget(self.time_series_chart) + # Create tabs + self.create_traffic_light_tab() + self.create_violation_tab() + self.create_vehicle_tab() - # Right side - Detection and violation charts - right_charts = QWidget() - right_layout = QVBoxLayout(right_charts) + layout.addWidget(self.tab_widget) - self.detection_chart = DetectionPieChart("Detection Classes") - self.violation_chart = ViolationBarChart("Violations by Type") + # Refresh button + refresh_btn = QPushButton("🔄 Refresh Data") + refresh_btn.setStyleSheet(""" + QPushButton { + background: qlineargradient(x1:0, y1:0, x2:0, y2:1, + stop:0 #3498DB, stop:1 #2980B9); + color: white; + border: none; + padding: 12px 24px; + border-radius: 6px; + font-weight: bold; + font-family: 'Roboto', Arial, sans-serif; + font-size: 14px; + } + QPushButton:hover { + background: qlineargradient(x1:0, y1:0, x2:0, y2:1, + stop:0 #5DADE2, stop:1 #3498DB); + } + QPushButton:pressed { + background: qlineargradient(x1:0, y1:0, x2:0, y2:1, + stop:0 #2980B9, stop:1 #21618C); + } + """) + refresh_btn.clicked.connect(self.refresh_all_data) - right_layout.addWidget(self.detection_chart) - right_layout.addWidget(self.violation_chart) + # Center the button + button_layout = QHBoxLayout() + button_layout.addStretch() + button_layout.addWidget(refresh_btn) + button_layout.addStretch() + layout.addLayout(button_layout) - charts_splitter.addWidget(right_charts) - charts_splitter.setSizes([500, 500]) # Equal initial sizes + def create_traffic_light_tab(self): + """Create traffic light status tab""" + tab = QWidget() + layout = QVBoxLayout(tab) + layout.setContentsMargins(15, 15, 15, 15) - main_layout.addWidget(charts_splitter) + # Table + self.traffic_table = QTableWidget(0, 5) + self.traffic_table.setHorizontalHeaderLabels([ + "Detection", "Red Ratio", "Yellow Ratio", "Green Ratio", "Status" + ]) - # Key metrics section - metrics_box = QGroupBox("Key Metrics") - metrics_layout = QHBoxLayout(metrics_box) + # Apply clean table styling + self.apply_table_style(self.traffic_table) - # Vehicle metrics - vehicle_metrics = QGroupBox("Traffic") - vehicle_layout = QVBoxLayout(vehicle_metrics) - self.total_vehicles_label = QLabel("Total Vehicles: 0") - self.total_pedestrians_label = QLabel("Total Pedestrians: 0") - vehicle_layout.addWidget(self.total_vehicles_label) - vehicle_layout.addWidget(self.total_pedestrians_label) - metrics_layout.addWidget(vehicle_metrics) + # Start with empty table - no sample data + layout.addWidget(self.traffic_table) - # Violation metrics - violation_metrics = QGroupBox("Violations") - violation_layout = QVBoxLayout(violation_metrics) - self.total_violations_label = QLabel("Total Violations: 0") - self.peak_violation_label = QLabel("Peak Violation Hour: --") - violation_layout.addWidget(self.total_violations_label) - violation_layout.addWidget(self.peak_violation_label) - metrics_layout.addWidget(violation_metrics) + self.tab_widget.addTab(tab, "🚦 Traffic Lights") - # Performance metrics - performance_metrics = QGroupBox("Performance") - performance_layout = QVBoxLayout(performance_metrics) - self.avg_fps_label = QLabel("Avg FPS: 0") - self.avg_processing_label = QLabel("Avg Processing Time: 0 ms") - performance_layout.addWidget(self.avg_fps_label) - performance_layout.addWidget(self.avg_processing_label) - metrics_layout.addWidget(performance_metrics) + def create_violation_tab(self): + """Create violation summary tab""" + tab = QWidget() + layout = QVBoxLayout(tab) + layout.setContentsMargins(15, 15, 15, 15) - main_layout.addWidget(metrics_box) + # Table + self.violation_table = QTableWidget(0, 3) + self.violation_table.setHorizontalHeaderLabels([ + "Track ID", "Violation Type", "Status" + ]) - # Controls - controls = QHBoxLayout() - self.reset_btn = QPushButton("Reset Statistics") - controls.addWidget(self.reset_btn) - controls.addStretch(1) # Push button to left + # Apply clean table styling + self.apply_table_style(self.violation_table) - main_layout.addLayout(controls) + # Start with empty table - no sample data + layout.addWidget(self.violation_table) + + self.tab_widget.addTab(tab, "🚨 Violations") + + def create_vehicle_tab(self): + """Create vehicle tracking status tab""" + tab = QWidget() + layout = QVBoxLayout(tab) + layout.setContentsMargins(15, 15, 15, 15) + + # Table + self.vehicle_table = QTableWidget(0, 6) + self.vehicle_table.setHorizontalHeaderLabels([ + "Track ID", "Position (x,y)", "Center Y", "Moving", "Violating", "Status" + ]) + + # Apply clean table styling + self.apply_table_style(self.vehicle_table) + + # Start with empty table - no sample data + layout.addWidget(self.vehicle_table) + + self.tab_widget.addTab(tab, "🚗 Vehicles") + + def apply_table_style(self, table): + """Apply consistent styling to tables""" + # Set font + font = QFont("Roboto", 10) + table.setFont(font) + + # Header styling + table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) + table.horizontalHeader().setStyleSheet(""" + QHeaderView::section { + background: qlineargradient(x1:0, y1:0, x2:0, y2:1, + stop:0 #1A252F, stop:1 #2C3E50); + color: #FFFFFF; + padding: 10px; + border: 1px solid #2C3E50; + font-weight: bold; + font-family: 'Roboto', Arial, sans-serif; + } + """) + + # Table styling + table.setStyleSheet(""" + QTableWidget { + gridline-color: #34495E; + background-color: #2C3E50; + alternate-background-color: #34495E; + selection-background-color: #3498DB; + border: 1px solid #34495E; + border-radius: 6px; + color: #FFFFFF; + } + QTableWidget::item { + padding: 8px; + border-bottom: 1px solid #34495E; + color: #FFFFFF; + } + QTableWidget::item:selected { + background-color: #3498DB; + color: #FFFFFF; + } + """) + + # Enable alternating row colors + table.setAlternatingRowColors(True) + + # Set selection behavior + table.setSelectionBehavior(QTableWidget.SelectRows) + + def populate_table(self, table, data, table_type): + """Populate table with data and apply color coding for dark theme""" + table.setRowCount(len(data)) + + for i, row in enumerate(data): + for j, item in enumerate(row): + cell = QTableWidgetItem(str(item)) + cell.setForeground(QColor(255, 255, 255)) # White text + + # Apply color coding based on content for dark theme + if table_type == "traffic_light": + if "🔴" in str(item): + cell.setBackground(QColor(139, 69, 19)) # Dark red/brown + elif "🟡" in str(item): + cell.setBackground(QColor(184, 134, 11)) # Dark yellow + elif "🟢" in str(item): + cell.setBackground(QColor(34, 139, 34)) # Dark green + + elif table_type == "violation": + if "Active" in str(item) or "🚨" in str(item): + cell.setBackground(QColor(139, 69, 19)) # Dark red/brown + cell.setForeground(QColor(255, 255, 255)) # White text + elif "Detected" in str(item): + cell.setBackground(QColor(205, 133, 63)) # Dark orange + cell.setForeground(QColor(255, 255, 255)) # White text + + elif table_type == "vehicle": + if "🔴" in str(item) or ("True" in str(item) and j == 4): # Violating column + cell.setBackground(QColor(139, 69, 19)) # Dark red/brown + cell.setForeground(QColor(255, 255, 255)) # White text + elif "🟢" in str(item): + cell.setBackground(QColor(34, 139, 34)) # Dark green + cell.setForeground(QColor(255, 255, 255)) # White text + + table.setItem(i, j, cell) + + def refresh_all_data(self): + """Refresh all tables with latest data""" + print("🔄 Refreshing analytics data...") + self.update_traffic_lights_table() + self.update_violations_table() + self.update_vehicles_table() @Slot(dict) - def update_analytics(self, analytics): - """ - Update analytics display with new data. - - Args: - analytics: Dictionary of analytics data - """ + def update_detection_data(self, detection_data): + """Update analytics with detection data from video tab""" try: - if not analytics: - return - - # Update latency chart - try: - if hasattr(self, 'latency_chart') and self.latency_chart is not None: - self.latency_chart.update_data(analytics.get('latency', {})) - except Exception as e: - print(f"[WARNING] Latency chart update failed: {e}") + print(f"[ANALYTICS UPDATE] Received detection data with keys: {list(detection_data.keys())}") + self.latest_frame_data = detection_data - # Update charts with error handling - try: - if hasattr(self, 'time_series_chart') and self.time_series_chart is not None: - self.time_series_chart.update_data(analytics.get('time_series', {})) - except Exception as e: - print(f"[WARNING] Time series chart update failed: {e}") - - try: - if hasattr(self, 'detection_chart') and self.detection_chart is not None: - self.detection_chart.update_data(analytics.get('detection_counts', {})) - except Exception as e: - print(f"[WARNING] Detection chart update failed: {e}") - - try: - if hasattr(self, 'violation_chart') and self.violation_chart is not None: - self.violation_chart.update_data(analytics.get('violation_counts', {})) - except Exception as e: - print(f"[WARNING] Violation chart update failed: {e}") + # Extract traffic lights + detections = detection_data.get('detections', []) + traffic_lights = [] + vehicles = [] - # Update metrics - try: - metrics = analytics.get('metrics', {}) - - if hasattr(self, 'total_vehicles_label'): - self.total_vehicles_label.setText(f"Total Vehicles: {metrics.get('total_vehicles', 0)}") - if hasattr(self, 'total_pedestrians_label'): - self.total_pedestrians_label.setText(f"Total Pedestrians: {metrics.get('total_pedestrians', 0)}") - - if hasattr(self, 'total_violations_label'): - self.total_violations_label.setText(f"Total Violations: {metrics.get('total_violations', 0)}") - - peak_hour = metrics.get('peak_violation_hour') - if peak_hour: - peak_text = f"Peak Violation Hour: {peak_hour.get('time', '--')} ({peak_hour.get('violations', 0)})" + for detection in detections: + if hasattr(detection, 'label'): + label = detection.label + elif isinstance(detection, dict): + label = detection.get('label', detection.get('class', detection.get('class_name', ''))) else: - peak_text = "Peak Violation Hour: --" - if hasattr(self, 'peak_violation_label'): - self.peak_violation_label.setText(peak_text) + label = str(detection) - if hasattr(self, 'avg_fps_label'): - self.avg_fps_label.setText(f"Avg FPS: {metrics.get('avg_fps', 0):.1f}") - if hasattr(self, 'avg_processing_label'): - self.avg_processing_label.setText( - f"Avg Processing Time: {metrics.get('avg_processing_time', 0):.1f} ms" - ) - except Exception as e: - print(f"[WARNING] Metrics update failed: {e}") + if 'traffic light' in str(label).lower(): + traffic_lights.append(detection) + elif any(vehicle_type in str(label).lower() for vehicle_type in ['car', 'truck', 'bus', 'motorcycle']): + vehicles.append(detection) + + self.latest_traffic_lights = traffic_lights + + # Extract vehicle tracking data - Handle the EXACT structure from video controller + tracked_vehicles = detection_data.get('tracked_vehicles', []) + print(f"[ANALYTICS UPDATE] Found {len(tracked_vehicles)} tracked vehicles") + + # Process tracked vehicles with the correct structure + processed_vehicles = [] + for vehicle in tracked_vehicles: + print(f"[ANALYTICS UPDATE] Raw vehicle data: {vehicle}") - # Update traffic light label with latest color - try: - tl_series = analytics.get('traffic_light_color_series', []) - if tl_series: - latest = tl_series[-1][1] - self.traffic_light_label.setText(f"Traffic Light: {latest.title()}") + # Handle the actual structure: {id, bbox, center_y, is_moving, is_violation} + if isinstance(vehicle, dict): + track_id = vehicle.get('id', 'Unknown') + bbox = vehicle.get('bbox', [0, 0, 0, 0]) + center_y = vehicle.get('center_y', 0) + moving = vehicle.get('is_moving', False) + violating = vehicle.get('is_violation', False) + + # Calculate center_x from bbox + if len(bbox) >= 4: + center_x = (bbox[0] + bbox[2]) / 2 + else: + center_x = 0 + else: - self.traffic_light_label.setText("Traffic Light: Unknown") - except Exception as e: - print(f"[WARNING] Traffic light label update failed: {e}") + # Fallback for other object types + track_id = getattr(vehicle, 'id', getattr(vehicle, 'track_id', 'Unknown')) + bbox = getattr(vehicle, 'bbox', [0, 0, 0, 0]) + center_y = getattr(vehicle, 'center_y', 0) + moving = getattr(vehicle, 'is_moving', getattr(vehicle, 'moving', False)) + violating = getattr(vehicle, 'is_violation', getattr(vehicle, 'violating', False)) + + if len(bbox) >= 4: + center_x = (bbox[0] + bbox[2]) / 2 + else: + center_x = 0 + processed_vehicles.append({ + 'track_id': track_id, + 'center': (center_x, center_y), + 'moving': moving, + 'violating': violating + }) + + print(f"[ANALYTICS UPDATE] Processed vehicle ID={track_id}, center=({center_x:.1f}, {center_y:.1f}), moving={moving}, violating={violating}") + + self.latest_vehicles = processed_vehicles + print(f"[ANALYTICS UPDATE] Stored {len(self.latest_vehicles)} processed vehicles") + + # Update tables with new data + self.update_traffic_lights_table() + self.update_vehicles_table() + except Exception as e: - print(f"[ERROR] Analytics update failed: {e}") + print(f"Error updating detection data: {e}") + import traceback + traceback.print_exc() + + @Slot(dict) + def update_violation_data(self, violation_data): + """Update violations data""" + try: + # Store violation data + track_id = violation_data.get('track_id') + violation_type = violation_data.get('type', 'Unknown') + + # Add to violations list if not already present + existing = [v for v in self.latest_violations if v.get('track_id') == track_id] + if not existing: + self.latest_violations.append({ + 'track_id': track_id, + 'type': violation_type, + 'status': 'Active', + 'timestamp': violation_data.get('timestamp', '') + }) + + self.update_violations_table() + + except Exception as e: + print(f"Error updating violation data: {e}") + + def update_traffic_lights_table(self): + """Update traffic lights table with latest data""" + try: + data = [] + + # Check if we have traffic light data from frame analysis + latest_traffic_light = self.latest_frame_data.get('traffic_light', {}) + if latest_traffic_light: + # Extract traffic light info + color = latest_traffic_light.get('color', 'unknown') + confidence = latest_traffic_light.get('confidence', 0.0) + + # Create traffic light entries based on the detected signal + if color == 'red': + status = "🔴 Red" + red_ratio = confidence + yellow_ratio = 0.0 + green_ratio = 0.0 + elif color == 'yellow': + status = "🟡 Yellow" + red_ratio = 0.0 + yellow_ratio = confidence + green_ratio = 0.0 + elif color == 'green': + status = "🟢 Green" + red_ratio = 0.0 + yellow_ratio = 0.0 + green_ratio = confidence + else: + status = "❓ Unknown" + red_ratio = 0.0 + yellow_ratio = 0.0 + green_ratio = 0.0 + + data.append([ + "Main Traffic Light", + f"{red_ratio:.3f}", + f"{yellow_ratio:.3f}", + f"{green_ratio:.3f}", + status + ]) + + # Also check for individual traffic light detections + for i, tl in enumerate(self.latest_traffic_lights): + bbox = tl.get('bbox', [0, 0, 0, 0]) + # Extract color ratios from debug data if available + color_info = tl.get('color_info', {}) + red_ratio = color_info.get('red', 0.0) + yellow_ratio = color_info.get('yellow', 0.0) + green_ratio = color_info.get('green', 0.0) + + # Determine status + if red_ratio > 0.3: + status = "🔴 Red" + elif yellow_ratio > 0.3: + status = "🟡 Yellow" + elif green_ratio > 0.3: + status = "🟢 Green" + else: + status = "❓ Unknown" + + data.append([ + f"Traffic Light {i+1}", + f"{red_ratio:.3f}", + f"{yellow_ratio:.3f}", + f"{green_ratio:.3f}", + status + ]) + + # If no data, show empty table instead of sample data + if not data: + data = [] + + self.populate_table(self.traffic_table, data, "traffic_light") + + except Exception as e: + print(f"Error updating traffic lights table: {e}") + + def update_violations_table(self): + """Update violations table with latest data""" + try: + data = [] + for violation in self.latest_violations: + data.append([ + str(violation.get('track_id', 'Unknown')), + f"🚨 {violation.get('type', 'Unknown')}", + violation.get('status', 'Active') + ]) + + # If no violations, show empty table + if not data: + data = [] + + self.populate_table(self.violation_table, data, "violation") + + except Exception as e: + print(f"Error updating violations table: {e}") + + def update_vehicles_table(self): + """Update vehicles table with latest data""" + try: + print(f"[ANALYTICS UPDATE] Updating vehicles table with {len(self.latest_vehicles)} vehicles") + data = [] + + for vehicle in self.latest_vehicles: + track_id = vehicle.get('track_id', 'Unknown') + center = vehicle.get('center', (0, 0)) + position = f"({center[0]:.1f}, {center[1]:.1f})" + center_y = center[1] if len(center) > 1 else 0 + moving = vehicle.get('moving', False) + violating = vehicle.get('violating', False) + + if violating: + status = "🔴 Violating" + elif moving: + status = "🟡 Moving" + else: + status = "🟢 Stopped" + + data.append([ + str(track_id), + position, + f"{center_y:.1f}", + str(moving), + str(violating), + status + ]) + + print(f"[ANALYTICS UPDATE] Added vehicle row: ID={track_id}, pos={position}, moving={moving}, violating={violating}, status={status}") + + print(f"[ANALYTICS UPDATE] Total vehicle rows to display: {len(data)}") + + # If no vehicles, show empty table + if not data: + data = [] + + self.populate_table(self.vehicle_table, data, "vehicle") + + except Exception as e: + print(f"Error updating vehicles table: {e}") + import traceback + traceback.print_exc() + + +class AnalyticsTab(QWidget): + """Main analytics tab with clean design""" + + def __init__(self): + super().__init__() + self.init_ui() + + def init_ui(self): + """Initialize the main analytics interface""" + layout = QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + + # Create the clean analytics widget + self.analytics_widget = CleanAnalyticsWidget() + layout.addWidget(self.analytics_widget) + + @Slot(dict) + def update_analytics(self, analytics): + """Update analytics with new data""" + # Forward to the analytics widget + if hasattr(self.analytics_widget, 'update_detection_data'): + self.analytics_widget.update_detection_data(analytics) + + @Slot(dict) + def update_detection_data(self, detection_data): + """Update detection data from video tab""" + self.analytics_widget.update_detection_data(detection_data) + + @Slot(dict) + def update_violation_data(self, violation_data): + """Update violation data""" + self.analytics_widget.update_violation_data(violation_data) + + @Slot(dict) + def update_smart_intersection_analytics(self, analytics_data): + """Update smart intersection analytics""" + # Extract relevant data and forward + if 'detections' in analytics_data: + self.analytics_widget.update_detection_data(analytics_data) + if 'violations' in analytics_data: + for violation in analytics_data['violations']: + self.analytics_widget.update_violation_data(violation) diff --git a/qt_app_pyside1/ui/config_panel.py b/qt_app_pyside1/ui/config_panel.py index 8563410..5080dec 100644 --- a/qt_app_pyside1/ui/config_panel.py +++ b/qt_app_pyside1/ui/config_panel.py @@ -2,11 +2,14 @@ from PySide6.QtWidgets import ( QWidget, QVBoxLayout, QHBoxLayout, QLabel, QComboBox, QSlider, QCheckBox, QPushButton, QGroupBox, QFormLayout, QSpinBox, QDoubleSpinBox, QTabWidget, QLineEdit, QFileDialog, - QSpacerItem, QSizePolicy + QSpacerItem, QSizePolicy, QScrollArea ) from PySide6.QtCore import Qt, Signal, Slot from PySide6.QtGui import QFont +# Import VLM insights widget +from ui.vlm_insights_widget import VLMInsightsWidget + class ConfigPanel(QWidget): """Side panel for application configuration.""" @@ -362,10 +365,62 @@ class ConfigPanel(QWidget): violation_layout.addWidget(violation_group) + # === VLM Insights Tab === + vlm_tab = QWidget() + vlm_layout = QVBoxLayout(vlm_tab) + + # Create scroll area for VLM insights + vlm_scroll = QScrollArea() + vlm_scroll.setWidgetResizable(True) + vlm_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) + vlm_scroll.setStyleSheet(""" + QScrollArea { + border: none; + background: transparent; + } + """) + + # Add VLM insights widget + print("[CONFIG PANEL DEBUG] Creating VLM insights widget...") + self.vlm_insights = VLMInsightsWidget() + print("[CONFIG PANEL DEBUG] VLM insights widget created successfully") + vlm_scroll.setWidget(self.vlm_insights) + vlm_layout.addWidget(vlm_scroll) + + # Smart Intersection Tab - Scene Analytics + smart_intersection_tab = QWidget() + si_layout = QVBoxLayout(smart_intersection_tab) + + # Smart Intersection config widget + si_scroll = QScrollArea() + si_scroll.setWidgetResizable(True) + si_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) + si_scroll.setStyleSheet(""" + QScrollArea { + border: none; + background: transparent; + } + """) + + try: + from ui.smart_intersection_config import SmartIntersectionConfigPanel + self.smart_intersection_config = SmartIntersectionConfigPanel() + si_scroll.setWidget(self.smart_intersection_config) + print("[CONFIG PANEL DEBUG] Smart Intersection config panel created successfully") + except Exception as e: + print(f"[CONFIG PANEL DEBUG] Error creating Smart Intersection config: {e}") + self.smart_intersection_config = None + si_scroll.setWidget(QLabel(f"Smart Intersection config unavailable: {e}")) + + si_layout.addWidget(si_scroll) + # Add all tabs tabs.addTab(detection_tab, "Detection") tabs.addTab(display_tab, "Display") tabs.addTab(violation_tab, "Violations") + tabs.addTab(vlm_tab, "🤖 AI Insights") # Add VLM insights tab + tabs.addTab(smart_intersection_tab, "🚦 Smart Intersection") # Add Smart Intersection tab + print("[CONFIG PANEL DEBUG] Added AI Insights and Smart Intersection tabs to config panel") layout.addWidget(tabs) diff --git a/qt_app_pyside1/ui/main_window.py b/qt_app_pyside1/ui/main_window.py index d05cdd8..3167a16 100644 --- a/qt_app_pyside1/ui/main_window.py +++ b/qt_app_pyside1/ui/main_window.py @@ -1,9 +1,9 @@ from PySide6.QtWidgets import ( QMainWindow, QTabWidget, QDockWidget, QMessageBox, - QApplication, QFileDialog, QSplashScreen, QVBoxLayout, QWidget + QApplication, QFileDialog, QSplashScreen, QVBoxLayout, QWidget, QLabel ) from PySide6.QtCore import Qt, QTimer, QSettings, QSize, Slot -from PySide6.QtGui import QIcon, QPixmap, QAction +from PySide6.QtGui import QIcon, QPixmap, QAction, QFont import os import sys @@ -24,19 +24,26 @@ if hasattr(Qt, 'qInstallMessageHandler'): from ui.analytics_tab import AnalyticsTab from ui.violations_tab import ViolationsTab from ui.export_tab import ExportTab -from ui.config_panel import ConfigPanel -from ui.live_multi_cam_tab import LiveMultiCamTab -from ui.video_detection_tab import VideoDetectionTab +from ui.modern_config_panel import ModernConfigPanel +from ui.modern_live_detection_tab import ModernLiveDetectionTab +# from ui.video_analysis_tab import VideoAnalysisTab +# from ui.video_detection_tab import VideoDetectionTab # Commented out - split into two separate tabs +from ui.video_detection_only_tab import VideoDetectionOnlyTab +from ui.smart_intersection_tab import SmartIntersectionTab from ui.global_status_panel import GlobalStatusPanel +from ui.vlm_insights_widget import VLMInsightsWidget # Import the new VLM Insights Widget +from ui.dashboard_tab import DashboardTab # Import the new Dashboard Tab # Import controllers from controllers.video_controller_new import VideoController from controllers.analytics_controller import AnalyticsController from controllers.performance_overlay import PerformanceOverlay from controllers.model_manager import ModelManager +# VLM Controller removed - functionality moved to insights widget # Import utilities from utils.helpers import load_configuration, save_configuration, save_snapshot +from utils.data_publisher import DataPublisher class MainWindow(QMainWindow): """Main application window.""" @@ -58,6 +65,9 @@ class MainWindow(QMainWindow): # Connect signals and slots self.connectSignals() + # Initialize config panel with current configuration + self.config_panel.set_config(self.config) + # Restore settings self.restoreSettings() @@ -70,49 +80,134 @@ class MainWindow(QMainWindow): def setupUI(self): """Set up the user interface""" # Window properties - self.setWindowTitle("Traffic Monitoring System (OpenVINO PySide6)") + self.setWindowTitle("Traffic Intersection Monitoring System") self.setMinimumSize(1200, 800) self.resize(1400, 900) # Set up central widget with tabs self.tabs = QTabWidget() + # Style the tabs + self.tabs.setStyleSheet(""" + QTabWidget::pane { + border: 1px solid #444; + background-color: #2b2b2b; + } + QTabBar::tab { + background-color: #3c3c3c; + color: white; + padding: 8px 16px; + margin: 2px; + border: 1px solid #555; + border-bottom: none; + border-radius: 4px 4px 0px 0px; + min-width: 120px; + } + QTabBar::tab:selected { + background-color: #0078d4; + border-color: #0078d4; + } + QTabBar::tab:hover { + background-color: #4a4a4a; + } + QTabBar::tab:!selected { + margin-top: 2px; + } + """) + # Create tabs - self.live_tab = LiveMultiCamTab() - self.video_detection_tab = VideoDetectionTab() + self.live_tab = ModernLiveDetectionTab() + # self.video_analysis_tab = VideoAnalysisTab() + # self.video_detection_tab = VideoDetectionTab() # Commented out - split into two separate tabs + self.video_detection_only_tab = VideoDetectionOnlyTab() + self.smart_intersection_tab = SmartIntersectionTab() self.analytics_tab = AnalyticsTab() self.violations_tab = ViolationsTab() self.export_tab = ExportTab() + # Remove VLM tab - VLM functionality moved to settings panel + # self.vlm_tab = VLMTab() # Create the VLM tab from ui.performance_graphs import PerformanceGraphsWidget self.performance_tab = PerformanceGraphsWidget() + # Add Dashboard tab + try: + self.dashboard_tab = DashboardTab() + except Exception as e: + print(f"Warning: Could not create Dashboard tab: {e}") + self.dashboard_tab = None + + # Add User Guide tab + try: + from ui.user_guide_tab import UserGuideTab + self.user_guide_tab = UserGuideTab() + except Exception as e: + print(f"Warning: Could not create User Guide tab: {e}") + self.user_guide_tab = None + # Add tabs to tab widget self.tabs.addTab(self.live_tab, "Live Detection") - self.tabs.addTab(self.video_detection_tab, "Video Detection") - self.tabs.addTab(self.performance_tab, "🔥 Performance & Latency") + # self.tabs.addTab(self.video_analysis_tab, "Video Analysis") + # self.tabs.addTab(self.video_detection_tab, "Smart Intersection") # Commented out - split into two tabs + self.tabs.addTab(self.video_detection_only_tab, "Video Detection") + # self.tabs.addTab(self.smart_intersection_tab, "Smart Intersection") # Temporarily hidden + if self.dashboard_tab: + self.tabs.addTab(self.dashboard_tab, "Dashboard") + self.tabs.addTab(self.performance_tab, "Performance & Latency") self.tabs.addTab(self.analytics_tab, "Analytics") self.tabs.addTab(self.violations_tab, "Violations") + # VLM functionality moved to settings panel + # self.tabs.addTab(self.vlm_tab, "🔍 Vision AI") # Add VLM tab with icon self.tabs.addTab(self.export_tab, "Export & Config") + # Add User Guide tab if available + if self.user_guide_tab: + self.tabs.addTab(self.user_guide_tab, "Help") + # Create config panel in dock widget - self.config_panel = ConfigPanel() + self.config_panel = ModernConfigPanel() dock = QDockWidget("Settings", self) dock.setObjectName("SettingsDock") # Set object name to avoid warning dock.setWidget(self.config_panel) dock.setFeatures(QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetClosable) dock.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) + + # Set minimum and preferred size for the dock widget + dock.setMinimumWidth(400) + dock.resize(450, 800) # Set preferred width and height + self.addDockWidget(Qt.RightDockWidgetArea, dock) # Create status bar self.statusBar().showMessage("Initializing...") + + # Create main layout with header main_layout = QVBoxLayout() + + # Add header title above tabs + header_label = QLabel("Traffic Intersection Monitoring System") + header_label.setAlignment(Qt.AlignCenter) + header_font = QFont() + header_font.setPointSize(14) + header_font.setBold(True) + header_label.setFont(header_font) + header_label.setStyleSheet(""" + QLabel { + color: #ffffff; + background-color: #2b2b2b; + padding: 10px; + border-bottom: 2px solid #0078d4; + margin-bottom: 5px; + } + """) + main_layout.addWidget(header_label) + main_layout.addWidget(self.tabs) central = QWidget() central.setLayout(main_layout) self.setCentralWidget(central) - # Create menu bar - self.setupMenus() + # Create menu bar - commented out for cleaner interface + # self.setupMenus() # Create performance overlay self.performance_overlay = PerformanceOverlay() @@ -131,6 +226,17 @@ class MainWindow(QMainWindow): # Create analytics controller self.analytics_controller = AnalyticsController() + + # Initialize data publisher for InfluxDB + print("[MAIN WINDOW DEBUG] Initializing Data Publisher...") + self.data_publisher = DataPublisher(self.config_file) + print("[MAIN WINDOW DEBUG] Data Publisher initialized successfully") + + # VLM controller - using only local VLM folder, no backend + print("[MAIN WINDOW DEBUG] Initializing VLM Controller with local VLM folder...") + from controllers.vlm_controller_new import VLMController + self.vlm_controller = VLMController() # No backend URL needed + print("[MAIN WINDOW DEBUG] VLM Controller initialized successfully") # Setup update timer for performance overlay self.perf_timer = QTimer() @@ -138,11 +244,56 @@ class MainWindow(QMainWindow): self.perf_timer.start(1000) # Update every second # Connect video_file_controller outputs to video_detection_tab - self.video_file_controller.frame_ready.connect(self.video_detection_tab.update_display, Qt.QueuedConnection) - self.video_file_controller.stats_ready.connect(self.video_detection_tab.update_stats, Qt.QueuedConnection) - self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.video_detection_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection) + # Connect video file controller signals to both video tabs + self.video_file_controller.frame_ready.connect(self.video_detection_only_tab.update_display, Qt.QueuedConnection) + self.video_file_controller.stats_ready.connect(self.video_detection_only_tab.update_stats, Qt.QueuedConnection) + self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.video_detection_only_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection) + + self.video_file_controller.frame_ready.connect(self.smart_intersection_tab.update_display, Qt.QueuedConnection) + self.video_file_controller.stats_ready.connect(self.smart_intersection_tab.update_stats, Qt.QueuedConnection) + self.video_file_controller.progress_ready.connect(lambda value, max_value, timestamp: self.smart_intersection_tab.update_progress(value, max_value, timestamp), Qt.QueuedConnection) + + # Connect video frames to VLM insights for analysis + if hasattr(self.video_file_controller, 'raw_frame_ready'): + print("[MAIN WINDOW DEBUG] Connecting raw_frame_ready signal to VLM insights") + self.video_file_controller.raw_frame_ready.connect( + self._forward_frame_to_vlm, Qt.QueuedConnection + ) + print("[MAIN WINDOW DEBUG] raw_frame_ready signal connected to VLM insights") + + # Also connect to analytics tab + print("[MAIN WINDOW DEBUG] Connecting raw_frame_ready signal to analytics tab") + self.video_file_controller.raw_frame_ready.connect( + self._forward_frame_to_analytics, Qt.QueuedConnection + ) + print("[MAIN WINDOW DEBUG] raw_frame_ready signal connected to analytics tab") + else: + print("[MAIN WINDOW DEBUG] raw_frame_ready signal not found in video_file_controller") # Connect auto model/device selection signal - self.video_detection_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection) + # Connect video tab auto-select signals + self.video_detection_only_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection) + self.smart_intersection_tab.auto_select_model_device.connect(self.video_file_controller.auto_select_model_device, Qt.QueuedConnection) + + # Connect VLM insights analysis requests to a simple mock handler (since optimum is disabled) + print("[MAIN WINDOW DEBUG] Checking for VLM insights widget...") + if hasattr(self.config_panel, 'vlm_insights_widget'): + print("[MAIN WINDOW DEBUG] VLM insights widget found, connecting signals...") + self.config_panel.vlm_insights_widget.analyze_frame_requested.connect(self._handle_vlm_analysis, Qt.QueuedConnection) + print("[MAIN WINDOW DEBUG] VLM insights analysis signal connected") + + # Connect pause state signal from video file controller to VLM insights + if hasattr(self.video_file_controller, 'pause_state_changed'): + self.video_file_controller.pause_state_changed.connect(self.config_panel.vlm_insights_widget.on_video_paused, Qt.QueuedConnection) + print("[MAIN WINDOW DEBUG] VLM insights pause state signal connected") + else: + print("[MAIN WINDOW DEBUG] pause_state_changed signal not found in video_file_controller") + else: + print("[MAIN WINDOW DEBUG] VLM insights widget NOT found in config panel") + + # Old VLM tab connections removed - functionality moved to insights widget + # self.vlm_tab.process_image_requested.connect(self.vlm_controller.process_image, Qt.QueuedConnection) + # self.video_controller.frame_np_ready.connect(self.vlm_tab.set_frame, Qt.QueuedConnection) + # self.video_file_controller.frame_np_ready.connect(self.vlm_tab.set_frame, Qt.QueuedConnection) except Exception as e: QMessageBox.critical( self, @@ -150,6 +301,7 @@ class MainWindow(QMainWindow): f"Error initializing controllers: {str(e)}" ) print(f"Error details: {e}") + traceback.print_exc() def connectSignals(self): @@ -212,14 +364,46 @@ class MainWindow(QMainWindow): self.export_tab.reload_config_btn.clicked.connect(self.load_config) self.export_tab.export_btn.clicked.connect(self.export_data) - # Video Detection tab connections - self.video_detection_tab.file_selected.connect(self._handle_video_file_selected) - self.video_detection_tab.play_clicked.connect(self._handle_video_play) - self.video_detection_tab.pause_clicked.connect(self._handle_video_pause) - self.video_detection_tab.stop_clicked.connect(self._handle_video_stop) - self.video_detection_tab.detection_toggled.connect(self._handle_video_detection_toggle) - self.video_detection_tab.screenshot_clicked.connect(self._handle_video_screenshot) - self.video_detection_tab.seek_changed.connect(self._handle_video_seek) + # Video Detection tab connections (standard tab) + self.video_detection_only_tab.file_selected.connect(self._handle_video_file_selected) + self.video_detection_only_tab.play_clicked.connect(self._handle_video_play) + self.video_detection_only_tab.pause_clicked.connect(self._handle_video_pause) + self.video_detection_only_tab.stop_clicked.connect(self._handle_video_stop) + self.video_detection_only_tab.detection_toggled.connect(self._handle_video_detection_toggle) + self.video_detection_only_tab.screenshot_clicked.connect(self._handle_video_screenshot) + self.video_detection_only_tab.seek_changed.connect(self._handle_video_seek) + + # Smart Intersection tab connections + self.smart_intersection_tab.file_selected.connect(self._handle_video_file_selected) + self.smart_intersection_tab.play_clicked.connect(self._handle_video_play) + self.smart_intersection_tab.pause_clicked.connect(self._handle_video_pause) + self.smart_intersection_tab.stop_clicked.connect(self._handle_video_stop) + self.smart_intersection_tab.detection_toggled.connect(self._handle_video_detection_toggle) + self.smart_intersection_tab.screenshot_clicked.connect(self._handle_video_screenshot) + self.smart_intersection_tab.seek_changed.connect(self._handle_video_seek) + + # Smart Intersection specific connections + self.smart_intersection_tab.smart_intersection_enabled.connect(self._handle_smart_intersection_enabled) + self.smart_intersection_tab.multi_camera_mode_enabled.connect(self._handle_multi_camera_mode) + self.smart_intersection_tab.roi_configuration_changed.connect(self._handle_roi_configuration_changed) + self.smart_intersection_tab.scene_analytics_toggled.connect(self._handle_scene_analytics_toggle) + + # Connect smart intersection controller if available + try: + from controllers.smart_intersection_controller import SmartIntersectionController + self.smart_intersection_controller = SmartIntersectionController() + + # Connect scene analytics signals + self.video_file_controller.frame_np_ready.connect( + self.smart_intersection_controller.process_frame, Qt.QueuedConnection + ) + self.smart_intersection_controller.scene_analytics_ready.connect( + self._handle_scene_analytics_update, Qt.QueuedConnection + ) + print("✅ Smart Intersection Controller connected") + except Exception as e: + print(f"⚠️ Smart Intersection Controller not available: {e}") + self.smart_intersection_controller = None # Connect OpenVINO device info signal to config panel from BOTH controllers self.video_controller.device_info_ready.connect(self.config_panel.update_devices_info, Qt.QueuedConnection) @@ -227,7 +411,57 @@ class MainWindow(QMainWindow): # After connecting video_file_controller and video_detection_tab, trigger auto model/device update QTimer.singleShot(0, self.video_file_controller.auto_select_model_device.emit) + + # Connect performance statistics from both controllers self.video_controller.performance_stats_ready.connect(self.update_performance_graphs) + self.video_file_controller.performance_stats_ready.connect(self.update_performance_graphs) + + # Connect enhanced performance tab signals + if hasattr(self, 'performance_tab'): + try: + # Connect performance tab signals for better integration + self.performance_tab.spike_detected.connect(self.handle_performance_spike) + self.performance_tab.device_switched.connect(self.handle_device_switch_notification) + self.performance_tab.performance_data_updated.connect(self.handle_performance_data_update) + print("✅ Performance tab signals connected successfully") + except Exception as e: + print(f"⚠️ Could not connect performance tab signals: {e}") + + @Slot(dict) + def handle_performance_spike(self, spike_data): + """Handle performance spike detection""" + try: + latency = spike_data.get('latency', 0) + device = spike_data.get('device', 'Unknown') + print(f"🚨 Performance spike detected: {latency:.1f}ms on {device}") + + # Optionally show notification or log to analytics + if hasattr(self, 'analytics_tab'): + # Could add spike to analytics if needed + pass + + except Exception as e: + print(f"❌ Error handling performance spike: {e}") + + @Slot(str) + def handle_device_switch_notification(self, device): + """Handle device switch notification""" + try: + print(f"🔄 Device switched to: {device}") + # Could update UI elements or show notification + except Exception as e: + print(f"❌ Error handling device switch notification: {e}") + + @Slot(dict) + def handle_performance_data_update(self, performance_data): + """Handle performance data updates for other components""" + try: + # Could forward to other tabs or components that need performance data + if hasattr(self, 'analytics_tab'): + # Forward performance data to analytics if needed + pass + except Exception as e: + print(f"❌ Error handling performance data update: {e}") def setupMenus(self): """Set up application menus""" # File menu @@ -284,16 +518,46 @@ class MainWindow(QMainWindow): if not config: return - # Update config - for section in config: - if section in self.config: - self.config[section].update(config[section]) - else: - self.config[section] = config[section] + # Convert flat config to nested structure for model manager + nested_config = { + "detection": {} + } - # Update model manager + # Map config panel values to model manager format + if 'device' in config: + nested_config["detection"]["device"] = config['device'] + if 'model' in config: + # Convert YOLOv11x format to yolo11x format for model manager + model_name = config['model'].lower() + if 'yolov11' in model_name: + model_name = model_name.replace('yolov11', 'yolo11') + elif model_name == 'auto': + model_name = 'auto' + nested_config["detection"]["model"] = model_name + if 'confidence_threshold' in config: + nested_config["detection"]["confidence_threshold"] = config['confidence_threshold'] + if 'iou_threshold' in config: + nested_config["detection"]["iou_threshold"] = config['iou_threshold'] + + print(f"🔧 Main Window: Applying config to model manager: {nested_config}") + print(f"🔧 Main Window: Received config from panel: {config}") + + # Update config + for section in nested_config: + if section in self.config: + self.config[section].update(nested_config[section]) + else: + self.config[section] = nested_config[section] + + # Update model manager with nested config if self.model_manager: - self.model_manager.update_config(self.config) + self.model_manager.update_config(nested_config) + + # Refresh model information in video controllers + if hasattr(self, 'video_controller') and self.video_controller: + self.video_controller.refresh_model_info() + if hasattr(self, 'video_file_controller') and self.video_file_controller: + self.video_file_controller.refresh_model_info() # Save config to file save_configuration(self.config, self.config_file) @@ -302,7 +566,9 @@ class MainWindow(QMainWindow): self.export_tab.update_config_display(self.config) # Update status - self.statusBar().showMessage("Configuration applied", 2000) + device = config.get('device', 'Unknown') + model = config.get('model', 'Unknown') + self.statusBar().showMessage(f"Configuration applied - Device: {device}, Model: {model}", 3000) @Slot() def load_config(self): @@ -642,6 +908,7 @@ class MainWindow(QMainWindow): confidence_str = f" (Confidence: {confidence:.2f})" if confidence > 0 else "" else: traffic_light_color = traffic_light_info + confidence = 1.0 confidence_str = "" if traffic_light_color != 'unknown': @@ -653,6 +920,16 @@ class MainWindow(QMainWindow): else: color_text = str(traffic_light_color).upper() self.statusBar().showMessage(f"Traffic Light: {color_text}{confidence_str}") + + # Publish traffic light status to InfluxDB + if hasattr(self, 'data_publisher') and self.data_publisher: + try: + color_for_publishing = traffic_light_color + if isinstance(traffic_light_color, dict): + color_for_publishing = traffic_light_color.get("color", "unknown") + self.data_publisher.publish_traffic_light_status(color_for_publishing, confidence) + except Exception as e: + print(f"❌ Error publishing traffic light status: {e}") @Slot(dict) def handle_violation_detected(self, violation): """Handle a detected traffic violation""" @@ -663,9 +940,28 @@ class MainWindow(QMainWindow): # Add to violations tab self.violations_tab.add_violation(violation) + # Update analytics tab with violation data + if hasattr(self.analytics_tab, 'update_violation_data'): + self.analytics_tab.update_violation_data(violation) + print(f"[ANALYTICS DEBUG] Violation data forwarded to analytics tab") + # Update analytics if self.analytics_controller: self.analytics_controller.register_violation(violation) + + # Publish violation to InfluxDB + if hasattr(self, 'data_publisher') and self.data_publisher: + try: + violation_type = violation.get('type', 'red_light_violation') + vehicle_id = violation.get('track_id', 'unknown') + details = { + 'timestamp': violation.get('timestamp', ''), + 'confidence': violation.get('confidence', 1.0), + 'location': violation.get('location', 'crosswalk') + } + self.data_publisher.publish_violation_event(violation_type, vehicle_id, details) + except Exception as e: + print(f"❌ Error publishing violation event: {e}") print(f"🚨 Violation processed: {violation['id']} at {violation['timestamp']}") except Exception as e: @@ -678,10 +974,29 @@ class MainWindow(QMainWindow): self.video_file_controller.set_source(file_path) def _handle_video_play(self): print("[VideoDetection] Play clicked") - self.video_file_controller.play() + # Check if video is paused, if so resume, otherwise start + if hasattr(self.video_file_controller, '_paused') and self.video_file_controller._paused: + self.video_file_controller.resume() + else: + self.video_file_controller.play() + # Notify VLM insights that video is playing (not paused) + print("[MAIN WINDOW DEBUG] Notifying VLM insights: video playing") + if hasattr(self, 'config_panel') and hasattr(self.config_panel, 'vlm_insights_widget'): + self.config_panel.vlm_insights_widget.on_video_paused(False) + print("[MAIN WINDOW DEBUG] VLM insights notified: not paused") + else: + print("[MAIN WINDOW DEBUG] VLM insights not found for play notification") + def _handle_video_pause(self): print("[VideoDetection] Pause clicked") self.video_file_controller.pause() + # Notify VLM insights that video is paused + print("[MAIN WINDOW DEBUG] Notifying VLM insights: video paused") + if hasattr(self, 'config_panel') and hasattr(self.config_panel, 'vlm_insights_widget'): + self.config_panel.vlm_insights_widget.on_video_paused(True) + print("[MAIN WINDOW DEBUG] VLM insights notified: paused") + else: + print("[MAIN WINDOW DEBUG] VLM insights not found for pause notification") def _handle_video_stop(self): print("[VideoDetection] Stop clicked") self.video_file_controller.stop() @@ -727,24 +1042,368 @@ class MainWindow(QMainWindow): self.statusBar().showMessage(f"Error switching device: {e}", 3000) @Slot(dict) def update_performance_graphs(self, stats): - """Update the performance graphs using the new robust widget logic.""" + """Update the performance graphs using the enhanced widget logic.""" if not hasattr(self, 'performance_tab'): return print(f"[PERF DEBUG] update_performance_graphs called with: {stats}") + + # Publish performance data to InfluxDB + if hasattr(self, 'data_publisher') and self.data_publisher: + try: + fps = stats.get('fps', 0) + inference_time = stats.get('inference_time', 0) + cpu_usage = stats.get('cpu_usage', None) + gpu_usage = stats.get('gpu_usage', None) + + self.data_publisher.publish_performance_data(fps, inference_time, cpu_usage, gpu_usage) + + # Publish device info periodically (every 10th frame) + if hasattr(self, '_device_info_counter'): + self._device_info_counter += 1 + else: + self._device_info_counter = 1 + + if self._device_info_counter % 10 == 0: + self.data_publisher.publish_device_info() + except Exception as e: + print(f"❌ Error publishing performance data: {e}") + + # Enhanced analytics data with proper structure + current_time = time.time() analytics_data = { 'real_time_data': { - 'timestamps': [stats.get('frame_idx', 0)], + 'timestamps': [current_time], 'inference_latency': [stats.get('inference_time', 0)], 'fps': [stats.get('fps', 0)], 'device_usage': [1 if stats.get('device', 'CPU') == 'GPU' else 0], 'resolution_width': [int(stats.get('resolution', '640x360').split('x')[0]) if 'x' in stats.get('resolution', '') else 640], 'resolution_height': [int(stats.get('resolution', '640x360').split('x')[1]) if 'x' in stats.get('resolution', '') else 360], - 'device_switches': [0] if stats.get('is_device_switch', False) else [], - 'resolution_changes': [0] if stats.get('is_res_change', False) else [], }, - 'latency_statistics': {}, - 'current_metrics': {}, - 'system_metrics': {}, + 'latency_statistics': { + 'avg': stats.get('avg_inference_time', 0), + 'max': stats.get('max_inference_time', 0), + 'min': stats.get('min_inference_time', 0), + 'spike_count': stats.get('spike_count', 0) + }, + 'current_metrics': { + 'device': stats.get('device', 'CPU'), + 'resolution': stats.get('resolution', 'Unknown'), + 'model': stats.get('model_name', stats.get('model', 'Unknown')), # Try model_name first, then model + 'fps': stats.get('fps', 0), + 'inference_time': stats.get('inference_time', 0) + }, + 'system_metrics': { + 'cpu_usage': stats.get('cpu_usage', 0), + 'gpu_usage': stats.get('gpu_usage', 0), + 'memory_usage': stats.get('memory_usage', 0) + } } - print(f"[PERF DEBUG] analytics_data for update_performance_data: {analytics_data}") + + print(f"[PERF DEBUG] Enhanced analytics_data: {analytics_data}") + + # Update performance graphs with enhanced data self.performance_tab.update_performance_data(analytics_data) + + def _handle_vlm_analysis(self, frame, prompt): + """Handle VLM analysis requests.""" + print(f"[MAIN WINDOW DEBUG] _handle_vlm_analysis called") + print(f"[MAIN WINDOW DEBUG] Frame type: {type(frame)}, shape: {frame.shape if hasattr(frame, 'shape') else 'N/A'}") + print(f"[MAIN WINDOW DEBUG] Prompt: '{prompt}'") + + try: + # Check if VLM controller is available + if hasattr(self, 'vlm_controller') and self.vlm_controller: + print(f"[MAIN WINDOW DEBUG] Using VLM controller for analysis") + + # Connect VLM result to insights widget if not already connected + if not hasattr(self, '_vlm_connected'): + print(f"[MAIN WINDOW DEBUG] Connecting VLM controller results to insights widget") + self.vlm_controller.result_ready.connect( + lambda result: self._handle_vlm_result(result), + Qt.QueuedConnection + ) + self._vlm_connected = True + + # Process image with VLM controller + self.vlm_controller.process_image(frame, prompt) + print(f"[MAIN WINDOW DEBUG] VLM controller processing started") + + else: + print(f"[MAIN WINDOW DEBUG] VLM controller not available, using mock analysis") + # Fallback to mock analysis + import cv2 + import numpy as np + result = self._generate_mock_analysis(frame, prompt) + print(f"[MAIN WINDOW DEBUG] Mock analysis generated: {len(result)} characters") + + # Send result back to VLM insights widget + if hasattr(self.config_panel, 'vlm_insights_widget'): + print(f"[MAIN WINDOW DEBUG] Sending mock result to VLM insights widget") + self.config_panel.vlm_insights_widget.on_analysis_result(result) + print(f"[MAIN WINDOW DEBUG] Mock result sent successfully") + else: + print(f"[MAIN WINDOW DEBUG] VLM insights widget not found") + + except Exception as e: + print(f"[VLM ERROR] Error in analysis: {e}") + if hasattr(self.config_panel, 'vlm_insights_widget'): + self.config_panel.vlm_insights_widget.on_analysis_result(f"Analysis error: {str(e)}") + + def _handle_vlm_result(self, result): + """Handle VLM controller results.""" + print(f"[MAIN WINDOW DEBUG] _handle_vlm_result called") + print(f"[MAIN WINDOW DEBUG] Result type: {type(result)}") + + try: + # Extract answer from result dict + if isinstance(result, dict): + if 'response' in result: + answer = result['response'] + print(f"[MAIN WINDOW DEBUG] Extracted response: {len(str(answer))} characters") + elif 'answer' in result: + answer = result['answer'] + print(f"[MAIN WINDOW DEBUG] Extracted answer: {len(str(answer))} characters") + else: + answer = str(result) + print(f"[MAIN WINDOW DEBUG] Using result as string: {len(answer)} characters") + else: + answer = str(result) + print(f"[MAIN WINDOW DEBUG] Using result as string: {len(answer)} characters") + + # Send result to VLM insights widget + if hasattr(self.config_panel, 'vlm_insights_widget'): + print(f"[MAIN WINDOW DEBUG] Sending VLM result to insights widget") + self.config_panel.vlm_insights_widget.on_analysis_result(answer) + print(f"[MAIN WINDOW DEBUG] VLM result sent successfully") + else: + print(f"[MAIN WINDOW DEBUG] VLM insights widget not found") + + except Exception as e: + print(f"[VLM ERROR] Error handling VLM result: {e}") + + def _forward_frame_to_vlm(self, frame, detections, fps): + """Forward frame to VLM insights widget.""" + print(f"[MAIN WINDOW DEBUG] _forward_frame_to_vlm called") + print(f"[MAIN WINDOW DEBUG] Frame type: {type(frame)}, shape: {frame.shape if hasattr(frame, 'shape') else 'N/A'}") + print(f"[MAIN WINDOW DEBUG] Detections count: {len(detections) if detections else 0}") + print(f"[MAIN WINDOW DEBUG] FPS: {fps}") + + # Publish detection events to InfluxDB + if hasattr(self, 'data_publisher') and self.data_publisher and detections: + try: + # Count vehicles and pedestrians + vehicle_count = 0 + pedestrian_count = 0 + + for detection in detections: + label = "" + if isinstance(detection, dict): + label = detection.get('label', '').lower() + elif hasattr(detection, 'label'): + label = getattr(detection, 'label', '').lower() + elif hasattr(detection, 'class_name'): + label = getattr(detection, 'class_name', '').lower() + elif hasattr(detection, 'cls'): + label = str(getattr(detection, 'cls', '')).lower() + + # Debug the label detection + if label and label != 'traffic light': + print(f"[PUBLISHER DEBUG] Detected object: {label}") + + if label in ['car', 'truck', 'bus', 'motorcycle', 'vehicle']: + vehicle_count += 1 + elif label in ['person', 'pedestrian']: + pedestrian_count += 1 + + # Also try to get vehicle count from tracked vehicles if available + if vehicle_count == 0 and hasattr(self, 'video_file_controller'): + try: + # Try to get vehicle count from current analysis data + analysis_data = getattr(self.video_file_controller, 'get_current_analysis_data', lambda: {})() + if isinstance(analysis_data, dict): + tracked_vehicles = analysis_data.get('tracked_vehicles', []) + if tracked_vehicles: + vehicle_count = len(tracked_vehicles) + print(f"[PUBLISHER DEBUG] Using tracked vehicle count: {vehicle_count}") + except: + pass + + self.data_publisher.publish_detection_events(vehicle_count, pedestrian_count) + except Exception as e: + print(f"❌ Error publishing detection events: {e}") + + try: + if hasattr(self.config_panel, 'vlm_insights_widget'): + print(f"[MAIN WINDOW DEBUG] Forwarding frame to VLM insights widget") + self.config_panel.vlm_insights_widget.set_current_frame(frame) + + # Store detection data for VLM analysis + if hasattr(self.config_panel.vlm_insights_widget, 'set_detection_data'): + print(f"[MAIN WINDOW DEBUG] Setting detection data for VLM") + detection_data = { + 'detections': detections, + 'fps': fps, + 'timestamp': time.time() + } + # Get additional data from video controller if available + if hasattr(self.video_file_controller, 'get_current_analysis_data'): + analysis_data = self.video_file_controller.get_current_analysis_data() + detection_data.update(analysis_data) + + self.config_panel.vlm_insights_widget.set_detection_data(detection_data) + print(f"[MAIN WINDOW DEBUG] Detection data set successfully") + + print(f"[MAIN WINDOW DEBUG] Frame forwarded successfully") + else: + print(f"[MAIN WINDOW DEBUG] VLM insights widget not found for frame forwarding") + except Exception as e: + print(f"[MAIN WINDOW DEBUG] Error forwarding frame to VLM: {e}") + + def _forward_frame_to_analytics(self, frame, detections, fps): + """Forward frame data to analytics tab for real-time updates.""" + try: + print(f"[ANALYTICS DEBUG] Forwarding frame data to analytics tab") + print(f"[ANALYTICS DEBUG] Detections count: {len(detections) if detections else 0}") + + # Prepare detection data for analytics + detection_data = { + 'detections': detections, + 'fps': fps, + 'timestamp': time.time(), + 'frame_shape': frame.shape if hasattr(frame, 'shape') else None + } + + # Get additional analysis data from video controller + if hasattr(self.video_file_controller, 'get_current_analysis_data'): + analysis_data = self.video_file_controller.get_current_analysis_data() + if analysis_data: + detection_data.update(analysis_data) + print(f"[ANALYTICS DEBUG] Updated with analysis data: {list(analysis_data.keys())}") + + # Forward to analytics tab + if hasattr(self.analytics_tab, 'update_detection_data'): + self.analytics_tab.update_detection_data(detection_data) + print(f"[ANALYTICS DEBUG] Detection data forwarded to analytics tab successfully") + else: + print(f"[ANALYTICS DEBUG] Analytics tab update_detection_data method not found") + + except Exception as e: + print(f"[ANALYTICS DEBUG] Error forwarding frame to analytics: {e}") + import traceback + traceback.print_exc() + + def _generate_mock_analysis(self, frame, prompt): + """Generate a mock analysis response based on frame content and prompt.""" + try: + import cv2 + import numpy as np + + # Analyze frame properties + h, w = frame.shape[:2] if frame is not None else (0, 0) + + # Basic image analysis + analysis_parts = [] + + if "traffic" in prompt.lower(): + analysis_parts.append("🚦 Traffic Analysis:") + analysis_parts.append(f"• Frame resolution: {w}x{h}") + analysis_parts.append("• Detected scene: Urban traffic intersection") + analysis_parts.append("• Visible elements: Road, potential vehicles") + analysis_parts.append("• Traffic flow appears to be moderate") + + elif "safety" in prompt.lower(): + analysis_parts.append("⚠️ Safety Assessment:") + analysis_parts.append("• Monitoring for traffic violations") + analysis_parts.append("• Checking lane discipline") + analysis_parts.append("• Observing traffic light compliance") + analysis_parts.append("• Overall safety level: Monitoring required") + + else: + analysis_parts.append("🔍 General Analysis:") + analysis_parts.append(f"• Image dimensions: {w}x{h} pixels") + analysis_parts.append("• Scene type: Traffic monitoring view") + analysis_parts.append("• Quality: Processing frame for analysis") + analysis_parts.append(f"• Prompt: {prompt[:100]}...") + + # Add timestamp and disclaimer + from datetime import datetime + timestamp = datetime.now().strftime("%H:%M:%S") + analysis_parts.append(f"\n📝 Analysis completed at {timestamp}") + analysis_parts.append("ℹ️ Note: This is a mock analysis. Full AI analysis requires compatible OpenVINO setup.") + + return "\n".join(analysis_parts) + + except Exception as e: + return f"Unable to analyze frame: {str(e)}" + + # Smart Intersection Signal Handlers + @Slot(bool) + def _handle_smart_intersection_enabled(self, enabled): + """Handle smart intersection mode toggle""" + print(f"🚦 Smart Intersection mode {'enabled' if enabled else 'disabled'}") + + if self.smart_intersection_controller: + self.smart_intersection_controller.set_enabled(enabled) + + # Update status + if enabled: + self.statusBar().showMessage("Smart Intersection mode activated") + else: + self.statusBar().showMessage("Standard detection mode") + + @Slot(bool) + def _handle_multi_camera_mode(self, enabled): + """Handle multi-camera mode toggle""" + print(f"📹 Multi-camera mode {'enabled' if enabled else 'disabled'}") + + if self.smart_intersection_controller: + self.smart_intersection_controller.set_multi_camera_mode(enabled) + + @Slot(dict) + def _handle_roi_configuration_changed(self, roi_config): + """Handle ROI configuration changes""" + print(f"🎯 ROI configuration updated: {len(roi_config.get('rois', []))} regions") + + if self.smart_intersection_controller: + self.smart_intersection_controller.update_roi_config(roi_config) + + @Slot(bool) + def _handle_scene_analytics_toggle(self, enabled): + """Handle scene analytics toggle""" + print(f"📊 Scene analytics {'enabled' if enabled else 'disabled'}") + + if self.smart_intersection_controller: + self.smart_intersection_controller.set_scene_analytics(enabled) + + @Slot(dict) + def _handle_scene_analytics_update(self, analytics_data): + """Handle scene analytics data updates""" + try: + # Update video detection tab with smart intersection data + smart_stats = { + 'total_objects': analytics_data.get('total_objects', 0), + 'active_tracks': analytics_data.get('active_tracks', 0), + 'roi_events': analytics_data.get('roi_events', 0), + 'crosswalk_events': analytics_data.get('crosswalk_events', 0), + 'lane_events': analytics_data.get('lane_events', 0), + 'safety_events': analytics_data.get('safety_events', 0), + 'north_objects': analytics_data.get('camera_stats', {}).get('north', 0), + 'east_objects': analytics_data.get('camera_stats', {}).get('east', 0), + 'south_objects': analytics_data.get('camera_stats', {}).get('south', 0), + 'west_objects': analytics_data.get('camera_stats', {}).get('west', 0), + 'fps': analytics_data.get('fps', 0), + 'processing_time': analytics_data.get('processing_time_ms', 0), + 'gpu_usage': analytics_data.get('gpu_usage', 0), + 'memory_usage': analytics_data.get('memory_usage', 0) + } + + # Update both video tabs with stats + self.video_detection_only_tab.update_stats(smart_stats) + self.smart_intersection_tab.update_stats(smart_stats) + + # Update analytics tab if it has smart intersection support + if hasattr(self.analytics_tab, 'update_smart_intersection_analytics'): + self.analytics_tab.update_smart_intersection_analytics(analytics_data) + + except Exception as e: + print(f"Error handling scene analytics update: {e}") diff --git a/qt_app_pyside1/ui/performance_graphs.py b/qt_app_pyside1/ui/performance_graphs.py index 6d18167..08369c3 100644 --- a/qt_app_pyside1/ui/performance_graphs.py +++ b/qt_app_pyside1/ui/performance_graphs.py @@ -5,16 +5,25 @@ Shows when latency spikes occur with different resolutions and devices from PySide6.QtWidgets import ( QWidget, QVBoxLayout, QHBoxLayout, QLabel, - QGroupBox, QTabWidget, QFrame, QSplitter + QGroupBox, QTabWidget, QFrame, QSplitter, QScrollArea ) from PySide6.QtCore import Qt, QTimer, Signal, Slot -from PySide6.QtGui import QPainter, QPen, QBrush, QColor, QFont +from PySide6.QtGui import QPainter, QPen, QBrush, QColor, QFont, QLinearGradient import numpy as np from collections import deque from typing import Dict, List, Any +import time + +# Try to import psutil for system monitoring, use fallback if not available +try: + import psutil + PSUTIL_AVAILABLE = True +except ImportError: + PSUTIL_AVAILABLE = False + print("⚠️ psutil not available - system monitoring will use fallback values") class RealTimeGraph(QWidget): - """Custom widget for drawing real-time graphs""" + """Custom widget for drawing real-time graphs with enhanced styling""" def __init__(self, title: str = "Graph", y_label: str = "Value", max_points: int = 300): super().__init__() @@ -29,20 +38,75 @@ class RealTimeGraph(QWidget): self.device_markers = deque(maxlen=max_points) # Mark device changes self.resolution_markers = deque(maxlen=max_points) # Mark resolution changes - # Graph settings - self.margin = 40 - self.grid_color = QColor(60, 60, 60) - self.line_color = QColor(0, 255, 255) # Cyan - self.spike_color = QColor(255, 0, 0) # Red for spikes - self.cpu_color = QColor(100, 150, 255) # Blue for CPU - self.gpu_color = QColor(255, 150, 100) # Orange for GPU + # Enhanced styling colors + self.bg_color = QColor(18, 18, 18) # Very dark background + self.grid_color = QColor(40, 40, 45) # Subtle grid + self.line_color = QColor(0, 230, 255) # Bright cyan + self.spike_color = QColor(255, 77, 77) # Bright red for spikes + self.cpu_color = QColor(120, 180, 255) # Light blue for CPU + self.gpu_color = QColor(255, 165, 0) # Orange for GPU + self.text_color = QColor(220, 220, 220) # Light gray text + self.accent_color = QColor(255, 215, 0) # Gold accent # Auto-scaling self.y_min = 0 self.y_max = 100 self.auto_scale = True + # Performance counters + self.spike_count = 0 + self.device_switches = 0 + self.resolution_changes = 0 + self.setMinimumSize(400, 200) + self.setStyleSheet(""" + QWidget { + background-color: #121212; + border: 1px solid #2a2a2a; + border-radius: 8px; + } + """) + + def add_data_point(self, x: float, y: float, is_spike: bool = False, device: str = "CPU", is_res_change: bool = False): + """Add a new data point to the graph""" + self.x_data.append(x) + self.y_data.append(y) + self.spike_markers.append(is_spike) + self.device_markers.append(device) + self.resolution_markers.append(is_res_change) + + # Update counters + if is_spike: + self.spike_count += 1 + if len(self.device_markers) > 1 and device != list(self.device_markers)[-2]: + self.device_switches += 1 + if is_res_change: + self.resolution_changes += 1 + + # Auto-scale Y axis with better algorithm + if self.auto_scale and self.y_data: + data_max = max(self.y_data) + data_min = min(self.y_data) + if data_max > data_min: + padding = (data_max - data_min) * 0.15 + self.y_max = data_max + padding + self.y_min = max(0, data_min - padding * 0.5) + else: + self.y_max = data_max + 10 if data_max > 0 else 100 + self.y_min = 0 + self.update() + + def clear_data(self): + """Clear the graph data""" + self.x_data.clear() + self.y_data.clear() + self.spike_markers.clear() + self.device_markers.clear() + self.resolution_markers.clear() + self.spike_count = 0 + self.device_switches = 0 + self.resolution_changes = 0 + self.update() def add_data_point(self, x: float, y: float, is_spike: bool = False, device: str = "CPU", is_res_change: bool = False): """Add a new data point to the graph""" @@ -71,133 +135,479 @@ class RealTimeGraph(QWidget): self.update() def paintEvent(self, event): - """Override paint event to draw the graph""" + """Override paint event to draw the graph with enhanced styling""" painter = QPainter(self) painter.setRenderHint(QPainter.Antialiasing) width = self.width() height = self.height() - graph_width = width - 2 * self.margin - graph_height = height - 2 * self.margin + margin = 50 + graph_width = width - 2 * margin + graph_height = height - 2 * margin - # Background - painter.fillRect(self.rect(), QColor(30, 30, 30)) + # Enhanced background with subtle gradient + gradient = QLinearGradient(0, 0, 0, height) + gradient.setColorAt(0, QColor(25, 25, 30)) + gradient.setColorAt(1, QColor(15, 15, 20)) + painter.fillRect(self.rect(), QBrush(gradient)) - # Title - painter.setPen(QColor(255, 255, 255)) - painter.setFont(QFont("Arial", 12, QFont.Bold)) - painter.drawText(10, 20, self.title) + # Title with glow effect + painter.setPen(self.accent_color) + painter.setFont(QFont("Segoe UI", 13, QFont.Bold)) + title_rect = painter.fontMetrics().boundingRect(self.title) + painter.drawText(15, 25, self.title) - # Axes - painter.setPen(QPen(QColor(200, 200, 200), 2)) - painter.drawLine(self.margin, self.margin, self.margin, height - self.margin) - painter.drawLine(self.margin, height - self.margin, width - self.margin, height - self.margin) + # Enhanced axes with better styling + painter.setPen(QPen(self.text_color, 2)) + painter.drawLine(margin, margin, margin, height - margin) # Y-axis + painter.drawLine(margin, height - margin, width - margin, height - margin) # X-axis - # Grid - painter.setPen(QPen(self.grid_color, 1)) - for i in range(5): - y = self.margin + (graph_height * i / 4) - painter.drawLine(self.margin, y, width - self.margin, y) - for i in range(10): - x = self.margin + (graph_width * i / 9) - painter.drawLine(x, self.margin, x, height - self.margin) + # Enhanced grid with subtle styling + painter.setPen(QPen(self.grid_color, 1, Qt.DotLine)) + # Horizontal grid lines + for i in range(1, 5): + y = margin + (graph_height * i / 4) + painter.drawLine(margin + 5, y, width - margin - 5, y) + # Vertical grid lines + for i in range(1, 10): + x = margin + (graph_width * i / 9) + painter.drawLine(x, margin + 5, x, height - margin - 5) - # Y-axis labels - painter.setPen(QColor(200, 200, 200)) - painter.setFont(QFont("Arial", 8)) + # Enhanced Y-axis labels with better formatting + painter.setPen(self.text_color) + painter.setFont(QFont("Segoe UI", 9)) for i in range(5): y_val = self.y_min + (self.y_max - self.y_min) * (4 - i) / 4 - y_pos = self.margin + (graph_height * i / 4) - painter.drawText(5, y_pos + 5, f"{y_val:.1f}") + y_pos = margin + (graph_height * i / 4) + if y_val >= 1000: + label = f"{y_val/1000:.1f}k" + elif y_val >= 1: + label = f"{y_val:.1f}" + else: + label = f"{y_val:.2f}" + painter.drawText(5, y_pos + 4, label) - # X-axis label + # Enhanced Y-axis label with rotation painter.save() - painter.translate(15, height // 2) + painter.setPen(self.text_color) + painter.setFont(QFont("Segoe UI", 10)) + painter.translate(20, height // 2) painter.rotate(-90) - painter.drawText(-len(self.y_label) * 3, 0, self.y_label) + painter.drawText(-len(self.y_label) * 4, 0, self.y_label) painter.restore() - # Data points + # Enhanced data visualization if len(self.x_data) >= 2 and len(self.y_data) >= 2: points = [] spike_points = [] device_changes = [] res_changes = [] + x_min = min(self.x_data) if self.x_data else 0 x_max = max(self.x_data) if self.x_data else 1 x_range = x_max - x_min if x_max > x_min else 1 + + # Prepare point coordinates for i, (x_val, y_val, is_spike, device, is_res_change) in enumerate(zip( self.x_data, self.y_data, self.spike_markers, self.device_markers, self.resolution_markers )): - x_screen = self.margin + (x_val - x_min) / x_range * graph_width - y_screen = height - self.margin - (y_val - self.y_min) / (self.y_max - self.y_min) * graph_height + x_screen = margin + (x_val - x_min) / x_range * graph_width + y_screen = height - margin - (y_val - self.y_min) / (self.y_max - self.y_min) * graph_height points.append((x_screen, y_screen)) + if is_spike: spike_points.append((x_screen, y_screen)) if i > 0 and device != list(self.device_markers)[i-1]: device_changes.append((x_screen, y_screen, device)) if is_res_change: res_changes.append((x_screen, y_screen)) + + # Draw main line with enhanced styling if len(points) >= 2: - painter.setPen(QPen(self.line_color, 2)) + painter.setPen(QPen(self.line_color, 3)) for i in range(len(points) - 1): x1, y1 = points[i] x2, y2 = points[i + 1] painter.drawLine(x1, y1, x2, y2) - painter.setPen(QPen(self.spike_color, 3)) + + # Add subtle glow effect to the line + painter.setPen(QPen(QColor(self.line_color.red(), self.line_color.green(), self.line_color.blue(), 60), 6)) + for i in range(len(points) - 1): + x1, y1 = points[i] + x2, y2 = points[i + 1] + painter.drawLine(x1, y1, x2, y2) + + # Enhanced spike markers + painter.setPen(QPen(self.spike_color, 2)) painter.setBrush(QBrush(self.spike_color)) for x, y in spike_points: - painter.drawEllipse(x - 3, y - 3, 6, 6) + painter.drawEllipse(x - 4, y - 4, 8, 8) + # Add spike indicator line + painter.drawLine(x, y - 10, x, y + 10) + + # Enhanced device change indicators for x, y, device in device_changes: color = self.gpu_color if device == "GPU" else self.cpu_color - painter.setPen(QPen(color, 2)) - painter.setBrush(QBrush(color)) - painter.drawRect(x - 2, self.margin, 4, graph_height) + painter.setPen(QPen(color, 3)) + painter.setBrush(QBrush(QColor(color.red(), color.green(), color.blue(), 100))) + painter.drawRect(x - 3, margin, 6, graph_height) + + # Add device label + painter.setPen(color) + painter.setFont(QFont("Segoe UI", 8, QFont.Bold)) + painter.drawText(x - 10, margin - 5, device) + + # Enhanced resolution change indicators for x, y in res_changes: - painter.setPen(QPen(QColor(255, 167, 38), 2)) # Orange for resolution change - painter.drawLine(x, self.margin, x, height - self.margin) + painter.setPen(QPen(QColor(255, 193, 7), 2)) # Amber color + painter.drawLine(x, margin, x, height - margin) + + # Add resolution change marker + painter.setBrush(QBrush(QColor(255, 193, 7))) + painter.drawEllipse(x - 3, margin - 5, 6, 6) class PerformanceGraphsWidget(QWidget): + """Enhanced performance graphs widget with real-time data visualization""" + + # Define signals for better integration + performance_data_updated = Signal(dict) + spike_detected = Signal(dict) + device_switched = Signal(str) + def __init__(self): super().__init__() self.setup_ui() + + # Enhanced timer setup self.update_timer = QTimer() self.update_timer.timeout.connect(self.update_graphs) + self.system_timer = QTimer() + self.system_timer.timeout.connect(self.update_system_metrics) + try: - self.update_timer.start(1000) + self.update_timer.start(500) # Update graphs every 500ms for smoother animation + self.system_timer.start(1000) # Update system metrics every second except Exception as e: - print(f"❌ Error starting performance graph timer: {e}") - self.start_time = None + print(f"❌ Error starting performance graph timers: {e}") + + # Enhanced data tracking + self.start_time = time.time() if time else None self.latest_data = {} self.cpu_usage_history = deque(maxlen=300) + self.ram_usage_history = deque(maxlen=300) # Add missing ram_usage_history + self.frame_counter = 0 + self.spike_threshold = 100.0 # Default spike threshold in ms + self.previous_device = "CPU" # Track device changes + + # Performance statistics + self.latency_stats = { + 'avg': 0.0, + 'max': 0.0, + 'min': float('inf'), + 'spike_count': 0 + } + + def __del__(self): + """Clean up timers when widget is destroyed""" + try: + if hasattr(self, 'system_timer') and self.system_timer: + self.system_timer.stop() + self.system_timer.deleteLater() + if hasattr(self, 'update_timer') and self.update_timer: + self.update_timer.stop() + self.update_timer.deleteLater() + except: + pass + + def closeEvent(self, event): + """Handle widget close event""" + try: + if hasattr(self, 'system_timer') and self.system_timer: + self.system_timer.stop() + if hasattr(self, 'update_timer') and self.update_timer: + self.update_timer.stop() + except: + pass + super().closeEvent(event) self.ram_usage_history = deque(maxlen=300) + self.spike_threshold = 100 # ms threshold for latency spikes + self.previous_device = "CPU" + self.frame_counter = 0 + + # Performance statistics + self.latency_stats = { + 'avg': 0.0, + 'max': 0.0, + 'min': float('inf'), + 'spike_count': 0 + } + + self.setStyleSheet(""" + QWidget { + background-color: #121212; + color: #ffffff; + } + QLabel { + color: #ffffff; + background: transparent; + } + QFrame { + background-color: #1a1a1a; + border: 1px solid #333333; + border-radius: 8px; + margin: 2px; + } + """) + def setup_ui(self): - layout = QVBoxLayout(self) + # Create main layout + main_layout = QVBoxLayout(self) + main_layout.setContentsMargins(5, 5, 5, 5) + main_layout.setSpacing(0) + + # Create scroll area + scroll_area = QScrollArea() + scroll_area.setWidgetResizable(True) + scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded) + scroll_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) + scroll_area.setStyleSheet(""" + QScrollArea { + border: none; + background-color: #121212; + } + QScrollBar:vertical { + background-color: #2C2C2C; + width: 12px; + border-radius: 6px; + } + QScrollBar::handle:vertical { + background-color: #555555; + border-radius: 6px; + min-height: 20px; + } + QScrollBar::handle:vertical:hover { + background-color: #777777; + } + QScrollBar:horizontal { + background-color: #2C2C2C; + height: 12px; + border-radius: 6px; + } + QScrollBar::handle:horizontal { + background-color: #555555; + border-radius: 6px; + min-width: 20px; + } + QScrollBar::handle:horizontal:hover { + background-color: #777777; + } + """) + + # Create scrollable content widget + content_widget = QWidget() + content_layout = QVBoxLayout(content_widget) + content_layout.setContentsMargins(10, 10, 10, 10) + content_layout.setSpacing(8) + + # Enhanced title section + title_frame = QFrame() + title_layout = QVBoxLayout(title_frame) + title_label = QLabel("🔥 Real-Time Inference Performance & Latency Spike Analysis") - title_label.setStyleSheet("font-size: 16px; font-weight: bold; color: #FFD700; margin: 10px;") - layout.addWidget(title_label) + title_label.setStyleSheet(""" + font-size: 18px; + font-weight: bold; + color: #FFD700; + margin: 8px; + background: qlineargradient(x1:0, y1:0, x2:1, y2:0, + stop:0 #FFD700, stop:1 #FFA500); + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + """) + title_layout.addWidget(title_label) + + # Enhanced system stats + stats_layout = QHBoxLayout() + self.cpu_ram_stats = QLabel("CPU: 0% | RAM: 0%") - self.cpu_ram_stats.setStyleSheet("color: #FFD700; font-weight: bold; font-size: 14px; margin: 8px;") - layout.addWidget(self.cpu_ram_stats) + self.cpu_ram_stats.setStyleSheet(""" + color: #00FFFF; + font-weight: bold; + font-size: 14px; + margin: 4px 8px; + padding: 4px 8px; + background-color: rgba(0, 255, 255, 0.1); + border-radius: 4px; + """) + stats_layout.addWidget(self.cpu_ram_stats) + + # Add current model display + self.current_model_stats = QLabel("Model: Loading...") + self.current_model_stats.setStyleSheet(""" + color: #FFD700; + font-weight: bold; + font-size: 14px; + margin: 4px 8px; + padding: 4px 8px; + background-color: rgba(255, 215, 0, 0.1); + border-radius: 4px; + """) + stats_layout.addWidget(self.current_model_stats) + + title_layout.addLayout(stats_layout) + + title_frame.setLayout(title_layout) + content_layout.addWidget(title_frame) + + # Enhanced splitter for graphs - set minimum sizes to avoid cluttering splitter = QSplitter(Qt.Vertical) - # Latency graph + splitter.setStyleSheet(""" + QSplitter::handle { + background-color: #333333; + height: 3px; + } + QSplitter::handle:hover { + background-color: #555555; + } + """) + + # Enhanced Latency graph latency_frame = QFrame() + latency_frame.setMinimumHeight(250) # Set minimum height to prevent cluttering + latency_frame.setStyleSheet(""" + QFrame { + background: qlineargradient(x1:0, y1:0, x2:0, y2:1, + stop:0 rgba(30, 30, 35, 255), + stop:1 rgba(20, 20, 25, 255)); + border: 2px solid #00FFFF; + border-radius: 10px; + } + """) latency_layout = QVBoxLayout(latency_frame) + self.latency_graph = RealTimeGraph( "Inference Latency Over Time", "Latency (ms)", max_points=300 ) + self.latency_graph.setMinimumHeight(200) # Ensure minimum display height latency_layout.addWidget(self.latency_graph) + latency_info = QHBoxLayout() - self.latency_stats = QLabel("Avg: 0ms | Max: 0ms | Spikes: 0") - self.latency_stats.setStyleSheet("color: #00FFFF; font-weight: bold;") - latency_info.addWidget(self.latency_stats) + self.latency_stats_label = QLabel("Avg: 0ms | Max: 0ms | Spikes: 0") + self.latency_stats_label.setStyleSheet(""" + color: #00FFFF; + font-weight: bold; + font-size: 12px; + padding: 4px 8px; + background-color: rgba(0, 255, 255, 0.15); + border-radius: 4px; + margin: 4px; + """) + latency_info.addWidget(self.latency_stats_label) latency_info.addStretch() latency_layout.addLayout(latency_info) + latency_frame.setLayout(latency_layout) splitter.addWidget(latency_frame) - # FPS graph + + # Enhanced FPS graph fps_frame = QFrame() + fps_frame.setMinimumHeight(250) # Set minimum height to prevent cluttering + fps_frame.setStyleSheet(""" + QFrame { + background: qlineargradient(x1:0, y1:0, x2:0, y2:1, + stop:0 rgba(30, 35, 30, 255), + stop:1 rgba(20, 25, 20, 255)); + border: 2px solid #00FF00; + border-radius: 10px; + } + """) + fps_layout = QVBoxLayout(fps_frame) + + self.fps_graph = RealTimeGraph( + "FPS & Resolution Impact", + "FPS", + max_points=300 + ) + self.fps_graph.setMinimumHeight(200) # Ensure minimum display height + fps_layout.addWidget(self.fps_graph) + + fps_info = QHBoxLayout() + self.fps_stats = QLabel("Current FPS: 0 | Resolution: - | Device: -") + self.fps_stats.setStyleSheet(""" + color: #00FF00; + font-weight: bold; + font-size: 12px; + padding: 4px 8px; + background-color: rgba(0, 255, 0, 0.15); + border-radius: 4px; + margin: 4px; + """) + fps_info.addWidget(self.fps_stats) + fps_info.addStretch() + fps_layout.addLayout(fps_info) + + fps_frame.setLayout(fps_layout) + splitter.addWidget(fps_frame) + + # Enhanced Device switching & resolution changes graph + device_frame = QFrame() + device_frame.setMinimumHeight(220) # Set minimum height to prevent cluttering + device_frame.setStyleSheet(""" + QFrame { + background: qlineargradient(x1:0, y1:0, x2:0, y2:1, + stop:0 rgba(35, 30, 30, 255), + stop:1 rgba(25, 20, 20, 255)); + border: 2px solid #FFB300; + border-radius: 10px; + } + """) + device_layout = QVBoxLayout(device_frame) + + self.device_graph = RealTimeGraph( + "Device Switching & Resolution Changes", + "Events", + max_points=300 + ) + self.device_graph.setMinimumHeight(170) # Ensure minimum display height + device_layout.addWidget(self.device_graph) + + self.device_legend = QLabel( + " CPU Spikes: 0 | " + " GPU Spikes: 0 | " + " Switches: 0 | " + " Res Changes: 0" + ) + self.device_legend.setStyleSheet(""" + color: #FFB300; + font-size: 12px; + font-weight: bold; + margin: 4px 8px; + padding: 4px 8px; + background-color: rgba(255, 179, 0, 0.15); + border-radius: 4px; + """) + device_layout.addWidget(self.device_legend) + + device_frame.setLayout(device_layout) + splitter.addWidget(device_frame) + + # Set splitter proportions with minimum space for each section + splitter.setSizes([300, 300, 250]) # Increased minimum sizes + splitter.setChildrenCollapsible(False) # Prevent collapsing sections + + content_layout.addWidget(splitter) + content_widget.setLayout(content_layout) + + # Set minimum size for content widget to ensure scrolling when needed + content_widget.setMinimumSize(400, 850) # Minimum width and height + + # Add content widget to scroll area + scroll_area.setWidget(content_widget) + + # Add scroll area to main layout + main_layout.addWidget(scroll_area) + self.setLayout(main_layout) fps_layout = QVBoxLayout(fps_frame) self.fps_graph = RealTimeGraph( "FPS & Resolution Impact", @@ -222,33 +632,267 @@ class PerformanceGraphsWidget(QWidget): max_points=300 ) device_layout.addWidget(self.device_graph) - self.device_legend = QLabel("CPU Spikes: 0 | GPU Spikes: 0 | Switches: 0 | Res Changes: 0") - self.device_legend.setStyleSheet("color: #ffb300; font-size: 13px; font-weight: bold; margin: 2px 0 0 8px;") - device_layout.addWidget(self.device_legend) - device_frame.setLayout(device_layout) - splitter.addWidget(device_frame) - layout.addWidget(splitter) - self.setLayout(layout) + # Add scroll area to main layout + main_layout.addWidget(scroll_area) + self.setLayout(main_layout) + + @Slot() + def update_system_metrics(self): + """Update system CPU and RAM usage""" + try: + # Check if the widget is still valid and not being destroyed + if not self or not hasattr(self, 'isVisible') or not self.isVisible(): + return + + # Check if widgets still exist before updating + if not hasattr(self, 'cpu_ram_stats') or not self.cpu_ram_stats: + return + if not hasattr(self, 'device_graph') or not self.device_graph: + return + + # Check if the RealTimeGraph objects are still valid + try: + if hasattr(self.device_graph, 'add_data_point'): + # Test if the object is still valid by accessing a simple property + _ = self.device_graph.objectName() + else: + return + except RuntimeError: + # Object has been deleted + return + + if PSUTIL_AVAILABLE: + cpu_percent = psutil.cpu_percent(interval=None) + memory = psutil.virtual_memory() + ram_percent = memory.percent + else: + # Fallback values when psutil is not available + cpu_percent = 0.0 + ram_percent = 0.0 + + if hasattr(self, 'cpu_usage_history'): + self.cpu_usage_history.append(cpu_percent) + if hasattr(self, 'ram_usage_history'): + self.ram_usage_history.append(ram_percent) + + # Update display + try: + if PSUTIL_AVAILABLE: + self.cpu_ram_stats.setText(f"CPU: {cpu_percent:.1f}% | RAM: {ram_percent:.1f}%") + else: + self.cpu_ram_stats.setText("CPU: -- | RAM: -- (monitoring unavailable)") + except RuntimeError: + # Widget has been deleted + return + + # Add CPU usage to device graph as background metric + try: + current_time = time.time() - self.start_time if self.start_time else 0 + self.device_graph.add_data_point(current_time, cpu_percent, device="System") + except RuntimeError: + # Graph has been deleted + return + + except Exception as e: + print(f"❌ Error updating system metrics: {e}") + # Fallback in case of any error + try: + if hasattr(self, 'cpu_ram_stats') and self.cpu_ram_stats: + self.cpu_ram_stats.setText("CPU: -- | RAM: -- (error)") + except: + pass + + @Slot() def update_graphs(self): - # Placeholder for updating graphs with new data - pass + """Update graphs with latest data""" + if not self.latest_data: + return + + try: + chart_data = self.latest_data.get('chart_data', {}) + latency_stats = self.latest_data.get('latency_stats', {}) + current_metrics = self.latest_data.get('current_metrics', {}) + + if not chart_data.get('timestamps'): + return + + # Get the latest data point + timestamps = chart_data.get('timestamps', []) + if not timestamps: + return + + latest_timestamp = timestamps[-1] + current_time = time.time() - self.start_time if self.start_time else latest_timestamp + + # Update latency graph + if 'inference_latency' in chart_data: + latency_values = chart_data['inference_latency'] + if latency_values: + latest_latency = latency_values[-1] + is_spike = latest_latency > self.spike_threshold + device = current_metrics.get('device', 'CPU') + + self.latency_graph.add_data_point( + current_time, + latest_latency, + is_spike=is_spike, + device=device + ) + + # Update latency statistics + self.latency_stats['max'] = max(self.latency_stats['max'], latest_latency) + self.latency_stats['min'] = min(self.latency_stats['min'], latest_latency) + if is_spike: + self.latency_stats['spike_count'] += 1 + # Emit spike signal + self.spike_detected.emit({ + 'latency': latest_latency, + 'timestamp': current_time, + 'device': device + }) + + # Calculate running average + if hasattr(self.latency_graph, 'y_data') and self.latency_graph.y_data: + self.latency_stats['avg'] = sum(self.latency_graph.y_data) / len(self.latency_graph.y_data) + + # Update FPS graph + if 'fps' in chart_data: + fps_values = chart_data['fps'] + if fps_values: + latest_fps = fps_values[-1] + device = current_metrics.get('device', 'CPU') + resolution = current_metrics.get('resolution', 'Unknown') + + # Check for device switch + device_switched = device != self.previous_device + if device_switched: + self.device_switched.emit(device) + self.previous_device = device + + self.fps_graph.add_data_point( + current_time, + latest_fps, + device=device, + is_res_change=False # Will be set by resolution change detection + ) + + # Update FPS stats display with model name + model_name = current_metrics.get('model', 'Unknown') + self.fps_stats.setText(f"Current FPS: {latest_fps:.1f} | Resolution: {resolution} | Device: {device} | Model: {model_name}") + + # Update device switching graph + device_usage = chart_data.get('device_usage', []) + if device_usage: + latest_usage = device_usage[-1] + device = current_metrics.get('device', 'CPU') + + self.device_graph.add_data_point( + current_time, + latest_usage * 100, # Convert to percentage + device=device + ) + + # Update statistics displays + self.latency_stats_label.setText( + f"Avg: {self.latency_stats['avg']:.1f}ms | " + f"Max: {self.latency_stats['max']:.1f}ms | " + f"Spikes: {self.latency_stats['spike_count']}" + ) + + # Update device legend + self.device_legend.setText( + f" CPU Spikes: {self.latency_graph.spike_count} | " + f" GPU Spikes: {self.device_graph.spike_count} | " + f" Switches: {self.device_graph.device_switches} | " + f" Res Changes: {self.device_graph.resolution_changes}" + ) + + # Update current model display + model_name = current_metrics.get('model', 'Unknown') + device = current_metrics.get('device', 'Unknown') + if hasattr(self, 'current_model_stats'): + self.current_model_stats.setText(f"Model: {model_name} | Device: {device}") + + self.frame_counter += 1 + + except Exception as e: + print(f"❌ Error updating performance graphs: {e}") + def update_performance_data(self, analytics_data: Dict[str, Any]): """Update graphs with new analytics data, including system metrics""" try: print(f"[PERF DEBUG] update_performance_data called with: {analytics_data}") + + # Initialize start time if not set + if self.start_time is None: + self.start_time = time.time() + chart_data = analytics_data.get('real_time_data', {}) latency_stats = analytics_data.get('latency_statistics', {}) current_metrics = analytics_data.get('current_metrics', {}) system_metrics = analytics_data.get('system_metrics', {}) + if not chart_data.get('timestamps'): print("[PERF DEBUG] No timestamps in chart_data") return + self.latest_data = { 'chart_data': chart_data, 'latency_stats': latency_stats, 'current_metrics': current_metrics, 'system_metrics': system_metrics } - self.update_graphs() # Immediately update graphs on new data + + # Emit signal for other components + self.performance_data_updated.emit(analytics_data) + + # Immediately update graphs on new data + self.update_graphs() + except Exception as e: print(f"❌ Error updating performance data: {e}") + + def clear_all_graphs(self): + """Clear all graph data""" + try: + self.latency_graph.clear_data() + self.fps_graph.clear_data() + self.device_graph.clear_data() + + # Reset statistics + self.latency_stats = { + 'avg': 0.0, + 'max': 0.0, + 'min': float('inf'), + 'spike_count': 0 + } + + self.frame_counter = 0 + self.start_time = time.time() + + # Update displays + self.latency_stats_label.setText("Avg: 0ms | Max: 0ms | Spikes: 0") + self.fps_stats.setText("Current FPS: 0 | Resolution: - | Device: -") + self.device_legend.setText( + " CPU Spikes: 0 | " + " GPU Spikes: 0 | " + " Switches: 0 | " + " Res Changes: 0" + ) + + except Exception as e: + print(f"❌ Error clearing graphs: {e}") + + def set_spike_threshold(self, threshold: float): + """Set the threshold for detecting latency spikes""" + self.spike_threshold = threshold + + def get_performance_summary(self) -> Dict[str, Any]: + """Get a summary of current performance metrics""" + return { + 'latency_stats': self.latency_stats.copy(), + 'frame_count': self.frame_counter, + 'cpu_usage': list(self.cpu_usage_history), + 'ram_usage': list(self.ram_usage_history), + 'current_device': self.previous_device + } diff --git a/qt_app_pyside1/ui/video_detection_tab.py b/qt_app_pyside1/ui/video_detection_tab.py index 58ec501..ca0f642 100644 --- a/qt_app_pyside1/ui/video_detection_tab.py +++ b/qt_app_pyside1/ui/video_detection_tab.py @@ -1,36 +1,502 @@ -from PySide6.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QSlider, QCheckBox, QFileDialog, QSizePolicy, QGridLayout, QFrame, QSpacerItem +from PySide6.QtWidgets import ( + QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QSlider, QCheckBox, + QFileDialog, QSizePolicy, QFrame, QTabWidget, QGridLayout, QComboBox, + QListWidget, QListWidgetItem, QGroupBox, QScrollArea +) from PySide6.QtCore import Signal, Qt -from PySide6.QtGui import QPixmap, QIcon, QFont +from PySide6.QtGui import QPixmap, QIcon +import json +import os +from pathlib import Path -class DiagnosticOverlay(QFrame): - """Semi-transparent overlay for diagnostics.""" +class SmartIntersectionOverlay(QFrame): + """Advanced overlay for Smart Intersection analytics.""" def __init__(self, parent=None): super().__init__(parent) self.setStyleSheet(""" - background: rgba(0,0,0,0.5); - border-radius: 8px; + background: rgba(0,20,40,0.85); + border: 2px solid #03DAC5; + border-radius: 12px; color: #fff; font-family: 'Consolas', 'SF Mono', 'monospace'; - font-size: 13px; + font-size: 12px; """) - # self.setFixedWidth(260) # Remove fixed width - self.setFixedHeight(90) - self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) # Allow horizontal stretch + self.setFixedHeight(140) + self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) self.setAttribute(Qt.WA_TransparentForMouseEvents) + layout = QVBoxLayout(self) - layout.setContentsMargins(12, 8, 12, 8) - self.model_label = QLabel("Model: -") - self.device_label = QLabel("Device: -") - self.stats_label = QLabel("Cars: 0 | Trucks: 0 | Ped: 0 | TLights: 0 | Moto: 0") - for w in [self.model_label, self.device_label, self.stats_label]: + layout.setContentsMargins(16, 12, 16, 12) + layout.setSpacing(4) + + # Title + title = QLabel("🚦 Smart Intersection Analytics") + title.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 14px;") + layout.addWidget(title) + + # Scene data + self.scene_label = QLabel("Scene: Multi-Camera Fusion") + self.tracking_label = QLabel("Active Tracks: 0") + self.roi_label = QLabel("ROI Events: 0") + + # Camera data + self.camera_label = QLabel("Cameras: North(0) East(0) South(0) West(0)") + + # Analytics data + self.analytics_label = QLabel("Analytics: Crosswalk(0) Lane(0) Safety(0)") + + for w in [self.scene_label, self.tracking_label, self.roi_label, + self.camera_label, self.analytics_label]: w.setStyleSheet("color: #fff;") layout.addWidget(w) - layout.addStretch(1) + + def update_smart_intersection(self, scene_data): + """Update smart intersection specific data""" + if not scene_data: + return + + # Update tracking info + active_tracks = scene_data.get('active_tracks', 0) + self.tracking_label.setText(f"Active Tracks: {active_tracks}") + + # Update ROI events + roi_events = scene_data.get('roi_events', 0) + self.roi_label.setText(f"ROI Events: {roi_events}") + + # Update camera data + cameras = scene_data.get('cameras', {}) + north = cameras.get('north', 0) + east = cameras.get('east', 0) + south = cameras.get('south', 0) + west = cameras.get('west', 0) + self.camera_label.setText(f"Cameras: North({north}) East({east}) South({south}) West({west})") + + # Update analytics + analytics = scene_data.get('analytics', {}) + crosswalk = analytics.get('crosswalk_events', 0) + lane = analytics.get('lane_events', 0) + safety = analytics.get('safety_events', 0) + self.analytics_label.setText(f"Analytics: Crosswalk({crosswalk}) Lane({lane}) Safety({safety})") + + +class IntersectionROIWidget(QFrame): + """Widget for defining and managing ROI regions for smart intersection""" + roi_updated = Signal(dict) + + def __init__(self, parent=None): + super().__init__(parent) + self.setStyleSheet(""" + QFrame { + background: #1a1a1a; + border: 1px solid #424242; + border-radius: 8px; + } + """) + self.setFixedWidth(300) + + layout = QVBoxLayout(self) + layout.setContentsMargins(16, 16, 16, 16) + + # Title + title = QLabel("🎯 Region of Interest (ROI)") + title.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 14px;") + layout.addWidget(title) + + # ROI Type selection + type_layout = QHBoxLayout() + type_layout.addWidget(QLabel("Type:")) + self.roi_type = QComboBox() + self.roi_type.addItems(["Crosswalk", "Traffic Lane", "Safety Zone", "Intersection Center"]) + type_layout.addWidget(self.roi_type) + layout.addLayout(type_layout) + + # ROI List + self.roi_list = QListWidget() + self.roi_list.setMaximumHeight(120) + layout.addWidget(self.roi_list) + + # ROI Controls + roi_controls = QHBoxLayout() + self.add_roi_btn = QPushButton("Add ROI") + self.delete_roi_btn = QPushButton("Delete") + self.add_roi_btn.setStyleSheet("background: #27ae60; color: white; border-radius: 4px; padding: 6px;") + self.delete_roi_btn.setStyleSheet("background: #e74c3c; color: white; border-radius: 4px; padding: 6px;") + roi_controls.addWidget(self.add_roi_btn) + roi_controls.addWidget(self.delete_roi_btn) + layout.addLayout(roi_controls) + + # Analytics settings + analytics_group = QGroupBox("Analytics Settings") + analytics_layout = QVBoxLayout(analytics_group) + + self.enable_tracking = QCheckBox("Multi-Object Tracking") + self.enable_speed = QCheckBox("Speed Estimation") + self.enable_direction = QCheckBox("Direction Analysis") + self.enable_safety = QCheckBox("Safety Monitoring") + + for cb in [self.enable_tracking, self.enable_speed, self.enable_direction, self.enable_safety]: + cb.setChecked(True) + cb.setStyleSheet("color: white;") + analytics_layout.addWidget(cb) + + layout.addWidget(analytics_group) + + # Connect signals + self.add_roi_btn.clicked.connect(self._add_roi) + self.delete_roi_btn.clicked.connect(self._delete_roi) + + # Initialize with default ROIs + self._init_default_rois() + + def _init_default_rois(self): + """Initialize with default intersection ROIs""" + default_rois = [ + "North Crosswalk", + "South Crosswalk", + "East Crosswalk", + "West Crosswalk", + "Center Intersection", + "North Lane", + "South Lane", + "East Lane", + "West Lane" + ] + + for roi in default_rois: + item = QListWidgetItem(roi) + item.setFlags(item.flags() | Qt.ItemIsUserCheckable) + item.setCheckState(Qt.Checked) + self.roi_list.addItem(item) + + def _add_roi(self): + """Add new ROI""" + roi_type = self.roi_type.currentText() + roi_name = f"{roi_type}_{self.roi_list.count() + 1}" + + item = QListWidgetItem(roi_name) + item.setFlags(item.flags() | Qt.ItemIsUserCheckable) + item.setCheckState(Qt.Checked) + self.roi_list.addItem(item) + + self._emit_roi_update() + + def _delete_roi(self): + """Delete selected ROI""" + current_row = self.roi_list.currentRow() + if current_row >= 0: + self.roi_list.takeItem(current_row) + self._emit_roi_update() + + def _emit_roi_update(self): + """Emit ROI configuration update""" + roi_config = { + 'rois': [], + 'analytics': { + 'tracking': self.enable_tracking.isChecked(), + 'speed': self.enable_speed.isChecked(), + 'direction': self.enable_direction.isChecked(), + 'safety': self.enable_safety.isChecked() + } + } + + for i in range(self.roi_list.count()): + item = self.roi_list.item(i) + roi_config['rois'].append({ + 'name': item.text(), + 'enabled': item.checkState() == Qt.Checked + }) + + self.roi_updated.emit(roi_config) + + +class MultiCameraView(QFrame): + """Multi-camera view for smart intersection""" + def __init__(self, parent=None): + super().__init__(parent) + self.setStyleSheet(""" + QFrame { + background: #0a0a0a; + border: 2px solid #424242; + border-radius: 8px; + } + """) + + layout = QGridLayout(self) + layout.setContentsMargins(8, 8, 8, 8) + layout.setSpacing(4) + + # Create camera views + self.camera_views = {} + positions = [('North', 0, 1), ('West', 1, 0), ('East', 1, 2), ('South', 2, 1)] + + for pos_name, row, col in positions: + view = self._create_camera_view(pos_name) + self.camera_views[pos_name.lower()] = view + layout.addWidget(view, row, col) + + # Center intersection view + center_view = self._create_intersection_center() + layout.addWidget(center_view, 1, 1) + + def _create_camera_view(self, position): + """Create individual camera view""" + view = QFrame() + view.setStyleSheet(""" + background: #1a1a1a; + border: 1px solid #555; + border-radius: 4px; + """) + view.setMinimumSize(160, 120) + view.setMaximumSize(200, 150) + + layout = QVBoxLayout(view) + layout.setContentsMargins(4, 4, 4, 4) + + # Title + title = QLabel(f"📹 {position}") + title.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 10px;") + title.setAlignment(Qt.AlignCenter) + layout.addWidget(title) + + # Video area + video_area = QLabel("No feed") + video_area.setStyleSheet("background: #000; color: #666; border: 1px dashed #333;") + video_area.setAlignment(Qt.AlignCenter) + video_area.setMinimumHeight(80) + layout.addWidget(video_area) + + # Stats + stats = QLabel("Objects: 0") + stats.setStyleSheet("color: #aaa; font-size: 9px;") + stats.setAlignment(Qt.AlignCenter) + layout.addWidget(stats) + + return view + + def _create_intersection_center(self): + """Create center intersection overview""" + view = QFrame() + view.setStyleSheet(""" + background: #2a1a1a; + border: 2px solid #03DAC5; + border-radius: 8px; + """) + view.setMinimumSize(160, 120) + view.setMaximumSize(200, 150) + + layout = QVBoxLayout(view) + layout.setContentsMargins(8, 8, 8, 8) + + title = QLabel("🚦 Intersection") + title.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 12px;") + title.setAlignment(Qt.AlignCenter) + layout.addWidget(title) + + # Intersection map + map_area = QLabel("Scene Map") + map_area.setStyleSheet("background: #000; color: #03DAC5; border: 1px solid #03DAC5;") + map_area.setAlignment(Qt.AlignCenter) + map_area.setMinimumHeight(80) + layout.addWidget(map_area) + + # Total stats + total_stats = QLabel("Total Objects: 0") + total_stats.setStyleSheet("color: #03DAC5; font-size: 10px; font-weight: bold;") + total_stats.setAlignment(Qt.AlignCenter) + layout.addWidget(total_stats) + + return view + + def update_camera_feed(self, camera_position, pixmap, object_count=0): + """Update specific camera feed""" + if camera_position.lower() in self.camera_views: + view = self.camera_views[camera_position.lower()] + video_label = view.findChild(QLabel) + if video_label and pixmap: + scaled = pixmap.scaled(video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) + video_label.setPixmap(scaled) + + # Update stats + stats_labels = view.findChildren(QLabel) + if len(stats_labels) >= 3: # title, video, stats + stats_labels[2].setText(f"Objects: {object_count}") + + +class EnhancedPerformanceOverlay(QFrame): + """Enhanced performance metrics overlay with traffic light status.""" + def __init__(self, parent=None): + super().__init__(parent) + self.setStyleSheet(""" + QFrame { + background: rgba(20, 30, 40, 0.95); + border: 2px solid #03DAC5; + border-radius: 12px; + color: #fff; + font-family: 'Segoe UI', 'Arial', sans-serif; + } + """) + self.setFixedHeight(140) + self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) + self.setAttribute(Qt.WA_TransparentForMouseEvents) + + layout = QVBoxLayout(self) + layout.setContentsMargins(16, 12, 16, 12) + layout.setSpacing(8) + + # Title row + title_layout = QHBoxLayout() + title = QLabel("📊 Real-time Performance Metrics") + title.setStyleSheet(""" + color: #03DAC5; + font-weight: bold; + font-size: 14px; + margin-bottom: 4px; + """) + title_layout.addWidget(title) + title_layout.addStretch() + + # Traffic light status + self.traffic_light_status = QLabel("🚦 Traffic: Unknown") + self.traffic_light_status.setStyleSheet(""" + color: #FFD700; + font-weight: bold; + font-size: 13px; + background: rgba(0,0,0,0.3); + padding: 4px 8px; + border-radius: 6px; + """) + title_layout.addWidget(self.traffic_light_status) + layout.addLayout(title_layout) + + # Performance metrics row + perf_layout = QHBoxLayout() + perf_layout.setSpacing(16) + + # FPS and Inference in badges + self.fps_label = QLabel("FPS: --") + self.fps_label.setStyleSheet(""" + background: #27AE60; + color: white; + font-weight: bold; + font-size: 13px; + padding: 6px 12px; + border-radius: 8px; + min-width: 70px; + """) + self.fps_label.setAlignment(Qt.AlignCenter) + + self.inference_label = QLabel("Inference: -- ms") + self.inference_label.setStyleSheet(""" + background: #3498DB; + color: white; + font-weight: bold; + font-size: 13px; + padding: 6px 12px; + border-radius: 8px; + min-width: 110px; + """) + self.inference_label.setAlignment(Qt.AlignCenter) + + perf_layout.addWidget(self.fps_label) + perf_layout.addWidget(self.inference_label) + perf_layout.addStretch() + layout.addLayout(perf_layout) + + # System info row + system_layout = QHBoxLayout() + self.model_label = QLabel("Model: -") + self.model_label.setStyleSheet(""" + color: #E74C3C; + font-weight: bold; + font-size: 12px; + background: rgba(231, 76, 60, 0.1); + padding: 4px 8px; + border-radius: 6px; + """) + + self.device_label = QLabel("Device: -") + self.device_label.setStyleSheet(""" + color: #9B59B6; + font-weight: bold; + font-size: 12px; + background: rgba(155, 89, 182, 0.1); + padding: 4px 8px; + border-radius: 6px; + """) + + system_layout.addWidget(self.model_label) + system_layout.addWidget(self.device_label) + system_layout.addStretch() + layout.addLayout(system_layout) + + # Vehicle counts row + self.vehicle_stats_label = QLabel("🚗 Vehicles: 0 | 🚛 Trucks: 0 | 🚶 Pedestrians: 0 | 🏍️ Motorcycles: 0") + self.vehicle_stats_label.setStyleSheet(""" + color: #F39C12; + font-weight: bold; + font-size: 12px; + background: rgba(243, 156, 18, 0.1); + padding: 6px 10px; + border-radius: 6px; + """) + layout.addWidget(self.vehicle_stats_label) def update_overlay(self, model, device, cars, trucks, peds, tlights, motorcycles): + """Update performance metrics""" self.model_label.setText(f"Model: {model}") self.device_label.setText(f"Device: {device}") - self.stats_label.setText(f"Cars: {cars} | Trucks: {trucks} | Ped: {peds} | TLights: {tlights} | Moto: {motorcycles}") + self.vehicle_stats_label.setText(f"🚗 Vehicles: {cars} | 🚛 Trucks: {trucks} | 🚶 Pedestrians: {peds} | 🏍️ Motorcycles: {motorcycles}") + + def update_performance_metrics(self, fps, inference_time): + """Update FPS and inference time""" + if fps is not None: + self.fps_label.setText(f"FPS: {fps:.1f}") + else: + self.fps_label.setText("FPS: --") + + if inference_time is not None: + self.inference_label.setText(f"Inference: {inference_time:.1f} ms") + else: + self.inference_label.setText("Inference: -- ms") + + def update_traffic_light_status(self, traffic_light_data): + """Update traffic light status""" + if traffic_light_data and isinstance(traffic_light_data, dict): + color = traffic_light_data.get('color', 'unknown') + confidence = traffic_light_data.get('confidence', 0) + + if color.lower() == 'red': + icon = "🔴" + text_color = "#E74C3C" + elif color.lower() == 'yellow': + icon = "🟡" + text_color = "#F39C12" + elif color.lower() == 'green': + icon = "🟢" + text_color = "#27AE60" + else: + icon = "⚫" + text_color = "#95A5A6" + + self.traffic_light_status.setText(f"{icon} Traffic: {color.title()} ({confidence:.2f})") + self.traffic_light_status.setStyleSheet(f""" + color: {text_color}; + font-weight: bold; + font-size: 13px; + background: rgba(0,0,0,0.3); + padding: 4px 8px; + border-radius: 6px; + """) + else: + self.traffic_light_status.setText("🚦 Traffic: Unknown") + self.traffic_light_status.setStyleSheet(""" + color: #95A5A6; + font-weight: bold; + font-size: 13px; + background: rgba(0,0,0,0.3); + padding: 4px 8px; + border-radius: 6px; + """) class VideoDetectionTab(QWidget): file_selected = Signal(str) @@ -40,16 +506,202 @@ class VideoDetectionTab(QWidget): detection_toggled = Signal(bool) screenshot_clicked = Signal() seek_changed = Signal(int) - auto_select_model_device = Signal() # New signal for auto model/device selection + auto_select_model_device = Signal() + + # Smart Intersection signals + smart_intersection_enabled = Signal(bool) + multi_camera_mode_enabled = Signal(bool) + roi_configuration_changed = Signal(dict) + scene_analytics_toggled = Signal(bool) def __init__(self): super().__init__() self.video_loaded = False - grid = QGridLayout(self) - grid.setContentsMargins(32, 24, 32, 24) - grid.setSpacing(0) - # File select bar (top) - file_bar = QHBoxLayout() + self.smart_intersection_mode = False + self.multi_camera_mode = False + + # Load smart intersection config + self.load_smart_intersection_config() + + # Main layout + main_layout = QHBoxLayout(self) + main_layout.setContentsMargins(16, 16, 16, 16) + main_layout.setSpacing(16) + + # Left panel - video and controls + left_panel = self._create_left_panel() + main_layout.addWidget(left_panel, 3) # 3/4 of the space + + # Right panel - smart intersection controls + right_panel = self._create_right_panel() + main_layout.addWidget(right_panel, 1) # 1/4 of the space + + def load_smart_intersection_config(self): + """Load smart intersection configuration""" + config_path = Path(__file__).parent.parent / "config" / "smart-intersection" / "desktop-config.json" + try: + if config_path.exists(): + with open(config_path, 'r') as f: + self.smart_config = json.load(f) + else: + self.smart_config = self._get_default_config() + except Exception as e: + print(f"Error loading smart intersection config: {e}") + self.smart_config = self._get_default_config() + + def _get_default_config(self): + """Get default smart intersection configuration""" + return { + "desktop_app_config": { + "scene_analytics": { + "enable_multi_camera": True, + "enable_roi_analytics": True, + "enable_vlm_integration": True + }, + "camera_settings": { + "max_cameras": 4, + "default_fps": 30 + }, + "analytics_settings": { + "object_tracking": True, + "speed_estimation": True, + "direction_analysis": True, + "safety_monitoring": True + } + } + } + + def _create_left_panel(self): + """Create main video panel""" + panel = QWidget() + layout = QVBoxLayout(panel) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(16) + + # Smart Intersection Mode Toggle + mode_bar = self._create_mode_bar() + layout.addWidget(mode_bar) + + # File select bar + file_bar = self._create_file_bar() + layout.addWidget(file_bar) + + # Video display area (with tabs for different modes) + self.video_tabs = QTabWidget() + self.video_tabs.setStyleSheet(""" + QTabWidget::pane { + border: 1px solid #424242; + background: #121212; + border-radius: 8px; + } + QTabBar::tab { + background: #232323; + color: #fff; + padding: 8px 16px; + margin-right: 2px; + border-top-left-radius: 8px; + border-top-right-radius: 8px; + } + QTabBar::tab:selected { + background: #03DAC5; + color: #000; + } + """) + + # Single camera tab + self.single_cam_widget = self._create_single_camera_view() + self.video_tabs.addTab(self.single_cam_widget, "📹 Single Camera") + + # Multi-camera tab + self.multi_cam_widget = MultiCameraView() + self.video_tabs.addTab(self.multi_cam_widget, "🚦 Multi-Camera Intersection") + + layout.addWidget(self.video_tabs) + + # Analytics overlay + self.analytics_overlay = self._create_analytics_overlay() + layout.addWidget(self.analytics_overlay) + + # Control bar + control_bar = self._create_control_bar() + layout.addWidget(control_bar) + + return panel + + def _create_mode_bar(self): + """Create smart intersection mode toggle bar""" + bar = QFrame() + bar.setStyleSheet(""" + QFrame { + background: #1a2332; + border: 2px solid #03DAC5; + border-radius: 12px; + padding: 8px; + } + """) + bar.setFixedHeight(60) + + layout = QHBoxLayout(bar) + layout.setContentsMargins(16, 8, 16, 8) + + # Smart Intersection Toggle + self.smart_intersection_toggle = QCheckBox("🚦 Smart Intersection Mode") + self.smart_intersection_toggle.setStyleSheet(""" + QCheckBox { + color: #03DAC5; + font-weight: bold; + font-size: 14px; + } + QCheckBox::indicator { + width: 20px; + height: 20px; + } + QCheckBox::indicator:checked { + background: #03DAC5; + border: 2px solid #03DAC5; + border-radius: 4px; + } + """) + self.smart_intersection_toggle.toggled.connect(self._toggle_smart_intersection) + layout.addWidget(self.smart_intersection_toggle) + + layout.addSpacing(32) + + # Multi-camera Toggle + self.multi_camera_toggle = QCheckBox("📹 Multi-Camera Fusion") + self.multi_camera_toggle.setStyleSheet(""" + QCheckBox { + color: #e67e22; + font-weight: bold; + font-size: 14px; + } + QCheckBox::indicator { + width: 20px; + height: 20px; + } + QCheckBox::indicator:checked { + background: #e67e22; + border: 2px solid #e67e22; + border-radius: 4px; + } + """) + self.multi_camera_toggle.toggled.connect(self._toggle_multi_camera) + layout.addWidget(self.multi_camera_toggle) + + layout.addStretch() + + # Status indicator + self.mode_status = QLabel("Standard Detection Mode") + self.mode_status.setStyleSheet("color: #bbb; font-size: 12px;") + layout.addWidget(self.mode_status) + + return bar + + def _create_file_bar(self): + """Create file selection bar""" + widget = QWidget() + bar = QHBoxLayout(widget) + self.file_btn = QPushButton() self.file_btn.setIcon(QIcon.fromTheme("folder-video")) self.file_btn.setText("Select Video") @@ -57,10 +709,20 @@ class VideoDetectionTab(QWidget): self.file_label = QLabel("No file selected") self.file_label.setStyleSheet("color: #bbb; font-size: 13px;") self.file_btn.clicked.connect(self._select_file) - file_bar.addWidget(self.file_btn) - file_bar.addWidget(self.file_label) - file_bar.addStretch(1) - # Video display area (centered, scalable) + + bar.addWidget(self.file_btn) + bar.addWidget(self.file_label) + bar.addStretch() + + return widget + + def _create_single_camera_view(self): + """Create single camera view widget""" + widget = QWidget() + layout = QVBoxLayout(widget) + layout.setContentsMargins(0, 0, 0, 0) + + # Video frame video_frame = QFrame() video_frame.setStyleSheet(""" background: #121212; @@ -69,9 +731,11 @@ class VideoDetectionTab(QWidget): """) video_frame.setMinimumSize(640, 360) video_frame.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) + video_layout = QVBoxLayout(video_frame) video_layout.setContentsMargins(0, 0, 0, 0) video_layout.setAlignment(Qt.AlignCenter) + self.video_label = QLabel() self.video_label.setAlignment(Qt.AlignCenter) self.video_label.setStyleSheet("background: transparent; color: #888; font-size: 18px;") @@ -79,31 +743,54 @@ class VideoDetectionTab(QWidget): self.video_label.setMinimumSize(640, 360) self.video_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) video_layout.addWidget(self.video_label) - # Diagnostic overlay (now below video, not over it) - self.overlay = DiagnosticOverlay() - self.overlay.setStyleSheet(self.overlay.styleSheet() + "border: 1px solid #03DAC5;") - self.overlay.setFixedHeight(90) - # FPS and Inference badges (below video) + + layout.addWidget(video_frame) + return widget + + def _create_analytics_overlay(self): + """Create analytics overlay that switches based on mode""" + container = QWidget() + self.overlay_layout = QVBoxLayout(container) + self.overlay_layout.setContentsMargins(0, 0, 0, 0) + + # Standard overlay + self.standard_overlay = EnhancedPerformanceOverlay() + self.standard_overlay.setStyleSheet(self.standard_overlay.styleSheet() + "border: 1px solid #03DAC5;") + + # Smart intersection overlay + self.smart_overlay = SmartIntersectionOverlay() + + # Badge bar + self.badge_bar = QHBoxLayout() + self.badge_bar.setContentsMargins(0, 8, 0, 8) + self.fps_badge = QLabel("FPS: --") self.fps_badge.setStyleSheet("background: #27ae60; color: #fff; border-radius: 12px; padding: 4px 24px; font-weight: bold; font-size: 15px;") self.fps_badge.setAlignment(Qt.AlignCenter) + self.inference_badge = QLabel("Inference: -- ms") self.inference_badge.setStyleSheet("background: #2980b9; color: #fff; border-radius: 12px; padding: 4px 24px; font-weight: bold; font-size: 15px;") self.inference_badge.setAlignment(Qt.AlignCenter) - # Horizontal layout for overlay and badges - self.badge_bar = QHBoxLayout() - self.badge_bar.setContentsMargins(0, 8, 0, 8) + self.badge_bar.addWidget(self.fps_badge) self.badge_bar.addSpacing(12) self.badge_bar.addWidget(self.inference_badge) self.badge_bar.addSpacing(18) - self.badge_bar.addWidget(self.overlay) # Overlay will stretch to fill right side - self.badge_bar.addStretch(10) - video_layout.addStretch(1) # Push badge bar to the bottom - video_layout.addLayout(self.badge_bar) - # Control bar (bottom) - control_bar = QHBoxLayout() + + # Add current overlay (start with standard) + self.current_overlay = self.standard_overlay + self.badge_bar.addWidget(self.current_overlay) + self.badge_bar.addStretch() + + self.overlay_layout.addLayout(self.badge_bar) + return container + + def _create_control_bar(self): + """Create control bar""" + widget = QWidget() + control_bar = QHBoxLayout(widget) control_bar.setContentsMargins(0, 16, 0, 0) + # Playback controls self.play_btn = QPushButton() self.play_btn.setIcon(QIcon.fromTheme("media-playback-start")) @@ -111,24 +798,30 @@ class VideoDetectionTab(QWidget): self.play_btn.setFixedSize(48, 48) self.play_btn.setEnabled(False) self.play_btn.setStyleSheet(self._button_style()) + self.pause_btn = QPushButton() self.pause_btn.setIcon(QIcon.fromTheme("media-playback-pause")) self.pause_btn.setToolTip("Pause") self.pause_btn.setFixedSize(48, 48) self.pause_btn.setEnabled(False) self.pause_btn.setStyleSheet(self._button_style()) + self.stop_btn = QPushButton() self.stop_btn.setIcon(QIcon.fromTheme("media-playback-stop")) self.stop_btn.setToolTip("Stop") self.stop_btn.setFixedSize(48, 48) self.stop_btn.setEnabled(False) self.stop_btn.setStyleSheet(self._button_style()) - for btn, sig in zip([self.play_btn, self.pause_btn, self.stop_btn], [self.play_clicked.emit, self.pause_clicked.emit, self.stop_clicked.emit]): + + for btn, sig in zip([self.play_btn, self.pause_btn, self.stop_btn], + [self.play_clicked.emit, self.pause_clicked.emit, self.stop_clicked.emit]): btn.clicked.connect(sig) + control_bar.addWidget(self.play_btn) control_bar.addWidget(self.pause_btn) control_bar.addWidget(self.stop_btn) control_bar.addSpacing(16) + # Progress bar self.progress = QSlider(Qt.Horizontal) self.progress.setStyleSheet("QSlider::groove:horizontal { height: 6px; background: #232323; border-radius: 3px; } QSlider::handle:horizontal { background: #03DAC5; border-radius: 8px; width: 18px; }") @@ -136,10 +829,12 @@ class VideoDetectionTab(QWidget): self.progress.setEnabled(False) self.progress.valueChanged.connect(self.seek_changed.emit) control_bar.addWidget(self.progress, 2) + self.timestamp = QLabel("00:00 / 00:00") self.timestamp.setStyleSheet("color: #bbb; font-size: 13px;") control_bar.addWidget(self.timestamp) control_bar.addSpacing(16) + # Detection toggle & screenshot self.detection_toggle = QCheckBox("Enable Detection") self.detection_toggle.setChecked(True) @@ -147,6 +842,7 @@ class VideoDetectionTab(QWidget): self.detection_toggle.setEnabled(False) self.detection_toggle.toggled.connect(self.detection_toggled.emit) control_bar.addWidget(self.detection_toggle) + self.screenshot_btn = QPushButton() self.screenshot_btn.setIcon(QIcon.fromTheme("camera-photo")) self.screenshot_btn.setText("Screenshot") @@ -155,15 +851,157 @@ class VideoDetectionTab(QWidget): self.screenshot_btn.setStyleSheet(self._button_style()) self.screenshot_btn.clicked.connect(self.screenshot_clicked.emit) control_bar.addWidget(self.screenshot_btn) - control_bar.addStretch(1) - # Layout grid - grid.addLayout(file_bar, 0, 0, 1, 1) - grid.addWidget(video_frame, 1, 0, 1, 1) - grid.addLayout(self.badge_bar, 2, 0, 1, 1) - grid.addLayout(control_bar, 3, 0, 1, 1) - grid.setRowStretch(1, 1) - self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) - + control_bar.addStretch() + + return widget + + def _create_right_panel(self): + """Create right panel for smart intersection controls""" + panel = QScrollArea() + panel.setWidgetResizable(True) + panel.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) + panel.setStyleSheet(""" + QScrollArea { + background: #1a1a1a; + border: 1px solid #424242; + border-radius: 8px; + } + """) + + content = QWidget() + layout = QVBoxLayout(content) + layout.setContentsMargins(16, 16, 16, 16) + layout.setSpacing(16) + + # Smart Intersection Controls + intersection_group = QGroupBox("🚦 Smart Intersection") + intersection_group.setStyleSheet(""" + QGroupBox { + color: #03DAC5; + font-weight: bold; + font-size: 14px; + border: 2px solid #03DAC5; + border-radius: 8px; + margin-top: 12px; + padding-top: 8px; + } + QGroupBox::title { + subcontrol-origin: margin; + left: 16px; + padding: 0 8px 0 8px; + } + """) + + intersection_layout = QVBoxLayout(intersection_group) + + # Scene Analytics Toggle + self.scene_analytics_toggle = QCheckBox("Scene Analytics") + self.scene_analytics_toggle.setChecked(True) + self.scene_analytics_toggle.setStyleSheet("color: white; font-size: 12px;") + self.scene_analytics_toggle.toggled.connect(self.scene_analytics_toggled.emit) + intersection_layout.addWidget(self.scene_analytics_toggle) + + # Multi-object tracking + self.multi_tracking_toggle = QCheckBox("Multi-Object Tracking") + self.multi_tracking_toggle.setChecked(True) + self.multi_tracking_toggle.setStyleSheet("color: white; font-size: 12px;") + intersection_layout.addWidget(self.multi_tracking_toggle) + + # Speed estimation + self.speed_estimation_toggle = QCheckBox("Speed Estimation") + self.speed_estimation_toggle.setChecked(True) + self.speed_estimation_toggle.setStyleSheet("color: white; font-size: 12px;") + intersection_layout.addWidget(self.speed_estimation_toggle) + + layout.addWidget(intersection_group) + + # ROI Management + self.roi_widget = IntersectionROIWidget() + self.roi_widget.roi_updated.connect(self.roi_configuration_changed.emit) + layout.addWidget(self.roi_widget) + + # Analytics Summary - Simplified + analytics_group = QGroupBox("📊 Quick Stats") + analytics_group.setStyleSheet(intersection_group.styleSheet().replace("#03DAC5", "#e67e22")) + analytics_layout = QVBoxLayout(analytics_group) + + self.total_objects_label = QLabel("Total Objects: 0") + self.active_vehicles_label = QLabel("Active Vehicles: 0") + self.traffic_status_label = QLabel("Traffic Light: Unknown") + + for label in [self.total_objects_label, self.active_vehicles_label, self.traffic_status_label]: + label.setStyleSheet("color: white; font-size: 12px; padding: 4px;") + analytics_layout.addWidget(label) + + layout.addWidget(analytics_group) + + # Performance Monitoring + perf_group = QGroupBox("⚡ Performance") + perf_group.setStyleSheet(intersection_group.styleSheet().replace("#03DAC5", "#9b59b6")) + perf_layout = QVBoxLayout(perf_group) + + self.gpu_usage_label = QLabel("GPU Usage: -%") + self.memory_usage_label = QLabel("Memory: - MB") + self.processing_time_label = QLabel("Processing: - ms") + + for label in [self.gpu_usage_label, self.memory_usage_label, self.processing_time_label]: + label.setStyleSheet("color: white; font-size: 12px;") + perf_layout.addWidget(label) + + layout.addWidget(perf_group) + + layout.addStretch() + + panel.setWidget(content) + return panel + + def _toggle_smart_intersection(self, enabled): + """Toggle smart intersection mode""" + self.smart_intersection_mode = enabled + self.smart_intersection_enabled.emit(enabled) + + # Switch overlay + if enabled: + self._switch_to_smart_overlay() + self.mode_status.setText("🚦 Smart Intersection Active") + self.mode_status.setStyleSheet("color: #03DAC5; font-weight: bold; font-size: 12px;") + else: + self._switch_to_standard_overlay() + self.mode_status.setText("Standard Detection Mode") + self.mode_status.setStyleSheet("color: #bbb; font-size: 12px;") + + # Enable/disable multi-camera toggle + self.multi_camera_toggle.setEnabled(enabled) + if not enabled: + self.multi_camera_toggle.setChecked(False) + + def _toggle_multi_camera(self, enabled): + """Toggle multi-camera mode""" + self.multi_camera_mode = enabled + self.multi_camera_mode_enabled.emit(enabled) + + if enabled: + self.video_tabs.setCurrentIndex(1) # Switch to multi-camera tab + self.mode_status.setText("🚦 Multi-Camera Intersection Active") + else: + self.video_tabs.setCurrentIndex(0) # Switch to single camera tab + if self.smart_intersection_mode: + self.mode_status.setText("🚦 Smart Intersection Active") + + def _switch_to_smart_overlay(self): + """Switch to smart intersection overlay""" + self.badge_bar.removeWidget(self.current_overlay) + self.current_overlay.setParent(None) + self.current_overlay = self.smart_overlay + self.badge_bar.addWidget(self.current_overlay) + + def _switch_to_standard_overlay(self): + """Switch to standard overlay""" + self.badge_bar.removeWidget(self.current_overlay) + self.current_overlay.setParent(None) + self.current_overlay = self.standard_overlay + self.badge_bar.addWidget(self.current_overlay) + def _button_style(self): return """ QPushButton { @@ -190,7 +1028,7 @@ class VideoDetectionTab(QWidget): self.video_loaded = True self._enable_controls(True) self.video_label.setText("") - self.auto_select_model_device.emit() # Request auto model/device selection + self.auto_select_model_device.emit() def _enable_controls(self, enabled): self.play_btn.setEnabled(enabled) @@ -200,13 +1038,19 @@ class VideoDetectionTab(QWidget): self.detection_toggle.setEnabled(enabled) self.screenshot_btn.setEnabled(enabled) if enabled: - self.auto_select_model_device.emit() # Also trigger auto-select when controls are enabled + self.auto_select_model_device.emit() def update_display(self, pixmap): - # Maintain aspect ratio + """Update display with new frame""" if pixmap: - scaled = pixmap.scaled(self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) - self.video_label.setPixmap(scaled) + if self.multi_camera_mode: + # In multi-camera mode, distribute to different camera views + # For now, just update the single view + scaled = pixmap.scaled(self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) + self.video_label.setPixmap(scaled) + else: + scaled = pixmap.scaled(self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) + self.video_label.setPixmap(scaled) self._set_controls_enabled(True) self.video_label.setStyleSheet("background: transparent; color: #888; font-size: 18px;") else: @@ -220,35 +1064,108 @@ class VideoDetectionTab(QWidget): btn.setEnabled(enabled) def update_stats(self, stats): - # Accepts a stats dict for extensibility - cars = stats.get('cars', 0) - trucks = stats.get('trucks', 0) - peds = stats.get('peds', 0) - tlights = stats.get('tlights', 0) - motorcycles = stats.get('motorcycles', 0) + """Update statistics display""" + if self.smart_intersection_mode: + # Update smart intersection overlay + scene_data = { + 'active_tracks': stats.get('total_objects', 0), + 'roi_events': stats.get('roi_events', 0), + 'cameras': { + 'north': stats.get('north_objects', 0), + 'east': stats.get('east_objects', 0), + 'south': stats.get('south_objects', 0), + 'west': stats.get('west_objects', 0) + }, + 'analytics': { + 'crosswalk_events': stats.get('crosswalk_events', 0), + 'lane_events': stats.get('lane_events', 0), + 'safety_events': stats.get('safety_events', 0) + } + } + self.smart_overlay.update_smart_intersection(scene_data) + + # Update right panel quick stats + self.total_objects_label.setText(f"Total Objects: {stats.get('total_objects', 0)}") + active_vehicles = stats.get('cars', 0) + stats.get('trucks', 0) + stats.get('motorcycles', 0) + self.active_vehicles_label.setText(f"Active Vehicles: {active_vehicles}") + traffic_light = stats.get('traffic_light', {}) + if traffic_light and isinstance(traffic_light, dict): + color = traffic_light.get('color', 'Unknown') + self.traffic_status_label.setText(f"Traffic Light: {color.title()}") + else: + self.traffic_status_label.setText("Traffic Light: Unknown") + else: + # Update enhanced performance overlay + cars = stats.get('cars', 0) + trucks = stats.get('trucks', 0) + peds = stats.get('peds', 0) + tlights = stats.get('tlights', 0) + motorcycles = stats.get('motorcycles', 0) + model = stats.get('model', stats.get('model_name', '-')) + device = stats.get('device', stats.get('device_name', '-')) + + # Update vehicle counts and system info + self.standard_overlay.update_overlay(model, device, cars, trucks, peds, tlights, motorcycles) + + # Update performance metrics (FPS and inference time) + fps = stats.get('fps', None) + inference = stats.get('inference', stats.get('detection_time', stats.get('detection_time_ms', None))) + self.standard_overlay.update_performance_metrics(fps, inference) + + # Update traffic light status + traffic_light = stats.get('traffic_light', None) + self.standard_overlay.update_traffic_light_status(traffic_light) + + # Update right panel quick stats for standard mode too + total_objects = cars + trucks + peds + motorcycles + tlights + self.total_objects_label.setText(f"Total Objects: {total_objects}") + active_vehicles = cars + trucks + motorcycles + self.active_vehicles_label.setText(f"Active Vehicles: {active_vehicles}") + if traffic_light and isinstance(traffic_light, dict): + color = traffic_light.get('color', 'Unknown') + self.traffic_status_label.setText(f"Traffic Light: {color.title()}") + else: + self.traffic_status_label.setText("Traffic Light: Unknown") + + # Update performance badges (keeping the existing FPS and inference badges) fps = stats.get('fps', None) - # Try all possible keys for inference time inference = stats.get('inference', stats.get('detection_time', stats.get('detection_time_ms', None))) - model = stats.get('model', stats.get('model_name', '-')) - device = stats.get('device', stats.get('device_name', '-')) - # Update overlay - self.overlay.update_overlay(model, device, cars, trucks, peds, tlights, motorcycles) - # Update FPS and Inference badges + if fps is not None: self.fps_badge.setText(f"FPS: {fps:.2f}") else: self.fps_badge.setText("FPS: --") + if inference is not None: self.inference_badge.setText(f"Inference: {inference:.1f} ms") else: self.inference_badge.setText("Inference: -- ms") + + # Update performance panel (simplified to focus on essential metrics) + self.gpu_usage_label.setText(f"GPU Usage: {stats.get('gpu_usage', 0):.1f}%") + self.memory_usage_label.setText(f"Memory: {stats.get('memory_usage', 0):.1f} MB") + self.processing_time_label.setText(f"Processing: {stats.get('processing_time', inference if inference else 0):.1f} ms") def update_progress(self, value, max_value, timestamp): self.progress.setMaximum(max_value) self.progress.setValue(value) - # Format timestamp as string (e.g., "00:00 / 00:00" or just str) if isinstance(timestamp, float) or isinstance(timestamp, int): timestamp_str = f"{timestamp:.2f}" else: timestamp_str = str(timestamp) self.timestamp.setText(timestamp_str) + + def update_multi_camera_feed(self, camera_position, pixmap, object_count=0): + """Update specific camera feed in multi-camera mode""" + if self.multi_camera_mode: + self.multi_cam_widget.update_camera_feed(camera_position, pixmap, object_count) + + def get_smart_intersection_config(self): + """Get current smart intersection configuration""" + return { + 'enabled': self.smart_intersection_mode, + 'multi_camera': self.multi_camera_mode, + 'scene_analytics': self.scene_analytics_toggle.isChecked(), + 'multi_tracking': self.multi_tracking_toggle.isChecked(), + 'speed_estimation': self.speed_estimation_toggle.isChecked() + } diff --git a/qt_app_pyside1/utils/__pycache__/__init__.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/__init__.cpython-311.pyc index 547e3f02c2c536e93407a71bfab2f02dbfd7c15c..0486070a95703d0f80e8f4750ec3faa98299fdfe 100644 GIT binary patch delta 88 zcmaFF_?eM=IWI340}zC$u9(Oj?-=B46%$&VT2vg9l3JFWlV6$=ZigC#=&&$bAOeu~jEQwDnD2OkpEY3_xjmgeP NEJ@5o6`Z)*0{}bW8La>S diff --git a/qt_app_pyside1/utils/__pycache__/annotation_utils.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/annotation_utils.cpython-311.pyc index 4434a6b936fac8865d429eec0002c70f7de2d3a0..09d9c21f407a65bcc41f4d4fdc63f215182f61eb 100644 GIT binary patch delta 76 zcmZ1#dmxs3IWI340}zC$uGq*e#BCJiY!wq)oLW>IlagANoReRg662CuoL!P%5aX7a cmza|plbn;9n5UZt;^-EX7H4d>QyDFA@IWI340}v=mb8&}3b^k-JF3Hq6;7CbT%Us5mAiwJbR&zceMrCAB!aB)=dg rup}lqCp9roH!U+SF-Nzcv^XQCup~aQpdh}WvN$s()o}A#i96;16&WAz delta 76 zcmbQXkg;b8xcED5BX^O6fv1aAj7xrbUQT{uN^wkKNqk~KL3}}Fab`+t SOm;?MNn$Rl;O1Qtcgz9wV;r^s diff --git a/qt_app_pyside1/utils/__pycache__/enhanced_annotation_utils.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/enhanced_annotation_utils.cpython-311.pyc index df27d1d4ddef7a91bcabb56289e64acfdd132776..3ddb3cf9fbc2c7952f511dbdf5827f80c806189a 100644 GIT binary patch delta 76 zcmcau^1Ot5IWI340}zC$uGq+3$7+=9Y!wq)oLW>IlagANoReRg662CuoL!P%5aX7a cmza|plbn;9n5UZt;^-EX7H4c;%DTn`0PVLMssI20 delta 54 zcmaD}a)^I diff --git a/qt_app_pyside1/utils/__pycache__/helpers.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/helpers.cpython-311.pyc index 74bb0c34977ffd7de73829fd16bb621a202e0616..d916ae382014d4ae50e63c1aaf40f64b777670e7 100644 GIT binary patch delta 90 zcmbOpvLb|gIWI340}zC$uGq+Zl+`iN*(xTqIJKxaCMC5jIVZm~CB`MSIJ+djAjT~- qFEJ-ICOIcHF;6!Q#L+D%EzXE3EQwDnD2OkpEY3_xHQdA|t^xpQz8~rU delta 74 zcmZ1xGChQQIWI340}yP84&KOpl-0n&#VW=nzdSD|KQW~^rm!SFv7jKnpt3kKB{e2H QBe5hg7gcbxAe*=f0Hh5X3IG5A diff --git a/qt_app_pyside1/utils/__pycache__/traffic_light_utils.cpython-311.pyc b/qt_app_pyside1/utils/__pycache__/traffic_light_utils.cpython-311.pyc index 451cde3a110024507983598f4e4eacf15776155e..6ba4b8b5f99981c8de52c100cbdb5ffba1b981ba 100644 GIT binary patch delta 78 zcmaEUmGSRYM(*Xjyj%=G5Td$bBX>TFQLM97OlWaxQE^O4YFTnlerZaKOKNd;Nq#|$ eTV`HjPHIeYPHJMFZW@TATToh@v3VLxU>X22^BhY6 delta 56 zcmex+mGSviM(*Xjyj%=GP}fwwkvpG7#@)p##wEWzFDE}Sr8verBe5hg*C-|%OmAMp H5|{=6BghlU