Clean push: Removed heavy files & added only latest snapshot
This commit is contained in:
1
qt_app_pyside1/controllers/__init__.py
Normal file
1
qt_app_pyside1/controllers/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Controllers package for Traffic Monitoring System
|
||||
BIN
qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
341
qt_app_pyside1/controllers/analytics_controller.py
Normal file
341
qt_app_pyside1/controllers/analytics_controller.py
Normal file
@@ -0,0 +1,341 @@
|
||||
from PySide6.QtCore import QObject, Signal, Slot
|
||||
import numpy as np
|
||||
from collections import defaultdict, deque
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any
|
||||
|
||||
class AnalyticsController(QObject):
|
||||
"""
|
||||
Controller for traffic analytics and statistics.
|
||||
|
||||
Manages:
|
||||
- Vehicle counts by class
|
||||
- Violation statistics
|
||||
- Temporal analytics (traffic over time)
|
||||
- Speed statistics
|
||||
"""
|
||||
analytics_updated = Signal(dict) # Emitted when analytics are updated
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the analytics controller"""
|
||||
super().__init__()
|
||||
|
||||
# Detection statistics
|
||||
self.detection_counts = defaultdict(int)
|
||||
self.detection_history = []
|
||||
|
||||
# Violation statistics
|
||||
self.violation_counts = defaultdict(int)
|
||||
self.violation_history = []
|
||||
|
||||
# Time series data (for charts)
|
||||
self.time_series = {
|
||||
'timestamps': [],
|
||||
'vehicle_counts': [],
|
||||
'pedestrian_counts': [],
|
||||
'violation_counts': []
|
||||
}
|
||||
|
||||
# Performance metrics
|
||||
self.fps_history = deque(maxlen=100)
|
||||
self.processing_times = deque(maxlen=100)
|
||||
|
||||
# Aggregated metrics
|
||||
self.aggregated_metrics = {
|
||||
'total_vehicles': 0,
|
||||
'total_pedestrians': 0,
|
||||
'total_violations': 0,
|
||||
'avg_processing_time': 0,
|
||||
'avg_fps': 0,
|
||||
'peak_vehicle_count': 0,
|
||||
'peak_violation_hour': None
|
||||
}
|
||||
|
||||
# Initialize current time window
|
||||
self.current_window = datetime.now().replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
)
|
||||
self.window_stats = defaultdict(int)
|
||||
|
||||
# Add traffic light analytics
|
||||
self.traffic_light_counts = defaultdict(int) # Counts by color
|
||||
self.traffic_light_color_series = [] # List of (timestamp, color)
|
||||
self.traffic_light_color_numeric = [] # For charting: 0=unknown, 1=red, 2=yellow, 3=green
|
||||
self.traffic_light_color_map = {'unknown': 0, 'red': 1, 'yellow': 2, 'green': 3}
|
||||
|
||||
self._last_update = time.time()
|
||||
@Slot(object, list, float)
|
||||
def process_frame_data(self, frame, detections, metrics):
|
||||
"""
|
||||
Process frame data for analytics.
|
||||
|
||||
Args:
|
||||
frame: Video frame
|
||||
detections: List of detections
|
||||
metrics: Dictionary containing metrics like 'detection_fps' or directly the fps value
|
||||
"""
|
||||
try:
|
||||
# Empty violations list since violation detection is disabled
|
||||
violations = []
|
||||
|
||||
# Debug info
|
||||
det_count = len(detections) if detections else 0
|
||||
print(f"Analytics processing: {det_count} detections")
|
||||
except Exception as e:
|
||||
print(f"Error in process_frame_data initialization: {e}")
|
||||
violations = []
|
||||
# Update FPS history - safely handle different metrics formats
|
||||
try:
|
||||
if isinstance(metrics, dict):
|
||||
fps = metrics.get('detection_fps', None)
|
||||
if isinstance(fps, (int, float)):
|
||||
self.fps_history.append(fps)
|
||||
elif isinstance(metrics, (int, float)):
|
||||
# Handle case where metrics is directly the fps value
|
||||
self.fps_history.append(metrics)
|
||||
else:
|
||||
# Fallback if metrics is neither dict nor numeric
|
||||
print(f"Warning: Unexpected metrics type: {type(metrics)}")
|
||||
except Exception as e:
|
||||
print(f"Error processing metrics: {e}")
|
||||
# Add a default value to keep analytics running
|
||||
self.fps_history.append(0.0)
|
||||
|
||||
# Process detections
|
||||
vehicle_count = 0
|
||||
pedestrian_count = 0
|
||||
|
||||
# --- Traffic light analytics ---
|
||||
traffic_light_count = 0
|
||||
traffic_light_colors = []
|
||||
for det in detections:
|
||||
class_name = det.get('class_name', 'unknown').lower()
|
||||
self.detection_counts[class_name] += 1
|
||||
|
||||
# Track vehicles vs pedestrians
|
||||
if class_name in ['car', 'truck', 'bus', 'motorcycle']:
|
||||
vehicle_count += 1
|
||||
elif class_name == 'person':
|
||||
pedestrian_count += 1
|
||||
if class_name in ['traffic light', 'trafficlight', 'tl', 'signal']:
|
||||
traffic_light_count += 1
|
||||
color = det.get('traffic_light_color', {}).get('color', 'unknown')
|
||||
self.traffic_light_counts[color] += 1
|
||||
traffic_light_colors.append(color)
|
||||
# Track most common color for this frame
|
||||
if traffic_light_colors:
|
||||
from collections import Counter
|
||||
most_common_color = Counter(traffic_light_colors).most_common(1)[0][0]
|
||||
else:
|
||||
most_common_color = 'unknown'
|
||||
now_dt = datetime.now()
|
||||
self.traffic_light_color_series.append((now_dt.strftime('%H:%M:%S'), most_common_color))
|
||||
self.traffic_light_color_numeric.append(self.traffic_light_color_map.get(most_common_color, 0))
|
||||
# Keep last 60 points
|
||||
if len(self.traffic_light_color_series) > 60:
|
||||
self.traffic_light_color_series = self.traffic_light_color_series[-60:]
|
||||
self.traffic_light_color_numeric = self.traffic_light_color_numeric[-60:]
|
||||
|
||||
# Update time series data (once per second)
|
||||
now = time.time()
|
||||
if now - self._last_update >= 1.0:
|
||||
self._update_time_series(vehicle_count, pedestrian_count, len(violations), most_common_color)
|
||||
self._last_update = now
|
||||
|
||||
# Update aggregated metrics
|
||||
self._update_aggregated_metrics()
|
||||
|
||||
# Emit updated analytics
|
||||
self.analytics_updated.emit(self.get_analytics())
|
||||
|
||||
def _update_time_series(self, vehicle_count, pedestrian_count, violation_count, traffic_light_color=None):
|
||||
"""Update time series data for charts"""
|
||||
now = datetime.now()
|
||||
|
||||
# Check if we've moved to a new hour
|
||||
if now.hour != self.current_window.hour or now.day != self.current_window.day:
|
||||
# Save current window stats
|
||||
self._save_window_stats()
|
||||
|
||||
# Reset for new window
|
||||
self.current_window = now.replace(minute=0, second=0, microsecond=0)
|
||||
self.window_stats = defaultdict(int)
|
||||
# Add current counts to window
|
||||
self.window_stats['vehicles'] += vehicle_count
|
||||
self.window_stats['pedestrians'] += pedestrian_count
|
||||
self.window_stats['violations'] += violation_count
|
||||
|
||||
# Add to time series
|
||||
self.time_series['timestamps'].append(now.strftime('%H:%M:%S'))
|
||||
self.time_series['vehicle_counts'].append(vehicle_count)
|
||||
self.time_series['pedestrian_counts'].append(pedestrian_count)
|
||||
self.time_series['violation_counts'].append(violation_count)
|
||||
|
||||
# Add traffic light color to time series
|
||||
if traffic_light_color is not None:
|
||||
if 'traffic_light_colors' not in self.time_series:
|
||||
self.time_series['traffic_light_colors'] = []
|
||||
self.time_series['traffic_light_colors'].append(traffic_light_color)
|
||||
if len(self.time_series['traffic_light_colors']) > 60:
|
||||
self.time_series['traffic_light_colors'] = self.time_series['traffic_light_colors'][-60:]
|
||||
|
||||
# Keep last 60 data points (1 minute at 1 Hz)
|
||||
if len(self.time_series['timestamps']) > 60:
|
||||
for key in self.time_series:
|
||||
self.time_series[key] = self.time_series[key][-60:]
|
||||
|
||||
def _save_window_stats(self):
|
||||
"""Save stats for the current time window"""
|
||||
if sum(self.window_stats.values()) > 0:
|
||||
window_info = {
|
||||
'time': self.current_window,
|
||||
'vehicles': self.window_stats['vehicles'],
|
||||
'pedestrians': self.window_stats['pedestrians'],
|
||||
'violations': self.window_stats['violations']
|
||||
}
|
||||
|
||||
# Update peak stats
|
||||
if window_info['vehicles'] > self.aggregated_metrics['peak_vehicle_count']:
|
||||
self.aggregated_metrics['peak_vehicle_count'] = window_info['vehicles']
|
||||
|
||||
if window_info['violations'] > 0:
|
||||
if self.aggregated_metrics['peak_violation_hour'] is None or \
|
||||
window_info['violations'] > self.aggregated_metrics['peak_violation_hour']['violations']:
|
||||
self.aggregated_metrics['peak_violation_hour'] = {
|
||||
'time': self.current_window.strftime('%H:%M'),
|
||||
'violations': window_info['violations']
|
||||
}
|
||||
|
||||
def _update_aggregated_metrics(self):
|
||||
"""Update aggregated analytics metrics"""
|
||||
# Count totals
|
||||
self.aggregated_metrics['total_vehicles'] = sum([
|
||||
self.detection_counts[c] for c in
|
||||
['car', 'truck', 'bus', 'motorcycle']
|
||||
])
|
||||
self.aggregated_metrics['total_pedestrians'] = self.detection_counts['person']
|
||||
self.aggregated_metrics['total_violations'] = sum(self.violation_counts.values())
|
||||
|
||||
# Average FPS
|
||||
if self.fps_history:
|
||||
# Only sum numbers, skip dicts
|
||||
numeric_fps = [f for f in self.fps_history if isinstance(f, (int, float))]
|
||||
if numeric_fps:
|
||||
self.aggregated_metrics['avg_fps'] = sum(numeric_fps) / len(numeric_fps)
|
||||
else:
|
||||
self.aggregated_metrics['avg_fps'] = 0.0
|
||||
|
||||
# Average processing time
|
||||
if self.processing_times:
|
||||
self.aggregated_metrics['avg_processing_time'] = sum(self.processing_times) / len(self.processing_times)
|
||||
|
||||
def get_analytics(self) -> Dict:
|
||||
"""
|
||||
Get current analytics data.
|
||||
|
||||
Returns:
|
||||
Dictionary of analytics data
|
||||
"""
|
||||
return {
|
||||
'detection_counts': dict(self.detection_counts),
|
||||
'violation_counts': dict(self.violation_counts),
|
||||
'time_series': self.time_series,
|
||||
'metrics': self.aggregated_metrics,
|
||||
'recent_violations': self.violation_history[-10:] if self.violation_history else [],
|
||||
'traffic_light_counts': dict(self.traffic_light_counts),
|
||||
'traffic_light_color_series': self.traffic_light_color_series,
|
||||
'traffic_light_color_numeric': self.traffic_light_color_numeric
|
||||
}
|
||||
|
||||
def get_violation_history(self) -> List:
|
||||
"""
|
||||
Get violation history.
|
||||
|
||||
Returns:
|
||||
List of violation events
|
||||
"""
|
||||
return self.violation_history.copy()
|
||||
|
||||
def clear_statistics(self):
|
||||
"""Reset all statistics"""
|
||||
self.detection_counts = defaultdict(int)
|
||||
self.violation_counts = defaultdict(int)
|
||||
self.detection_history = []
|
||||
self.violation_history = []
|
||||
self.time_series = {
|
||||
'timestamps': [],
|
||||
'vehicle_counts': [],
|
||||
'pedestrian_counts': [],
|
||||
'violation_counts': []
|
||||
}
|
||||
self.fps_history.clear()
|
||||
self.processing_times.clear()
|
||||
self.window_stats = defaultdict(int)
|
||||
self.aggregated_metrics = {
|
||||
'total_vehicles': 0,
|
||||
'total_pedestrians': 0,
|
||||
'total_violations': 0,
|
||||
'avg_processing_time': 0,
|
||||
'avg_fps': 0,
|
||||
'peak_vehicle_count': 0,
|
||||
'peak_violation_hour': None
|
||||
}
|
||||
|
||||
def register_violation(self, violation):
|
||||
"""
|
||||
Register a new violation in the analytics.
|
||||
|
||||
Args:
|
||||
violation: Dictionary with violation information
|
||||
"""
|
||||
try:
|
||||
# Add to violation counts - check both 'violation' and 'violation_type' keys
|
||||
violation_type = violation.get('violation_type') or violation.get('violation', 'unknown')
|
||||
self.violation_counts[violation_type] += 1
|
||||
|
||||
# Add to violation history
|
||||
self.violation_history.append(violation)
|
||||
|
||||
# Update time series
|
||||
now = datetime.now()
|
||||
self.time_series['timestamps'].append(now)
|
||||
|
||||
# If we've been running for a while, we might need to drop old timestamps
|
||||
if len(self.time_series['timestamps']) > 100: # Keep last 100 points
|
||||
self.time_series['timestamps'] = self.time_series['timestamps'][-100:]
|
||||
self.time_series['vehicle_counts'] = self.time_series['vehicle_counts'][-100:]
|
||||
self.time_series['pedestrian_counts'] = self.time_series['pedestrian_counts'][-100:]
|
||||
self.time_series['violation_counts'] = self.time_series['violation_counts'][-100:]
|
||||
|
||||
# Append current totals to time series
|
||||
self.time_series['violation_counts'].append(sum(self.violation_counts.values()))
|
||||
|
||||
# Make sure all time series have the same length
|
||||
while len(self.time_series['vehicle_counts']) < len(self.time_series['timestamps']):
|
||||
self.time_series['vehicle_counts'].append(sum(self.detection_counts.get(c, 0)
|
||||
for c in ['car', 'truck', 'bus', 'motorcycle']))
|
||||
|
||||
while len(self.time_series['pedestrian_counts']) < len(self.time_series['timestamps']):
|
||||
self.time_series['pedestrian_counts'].append(self.detection_counts.get('person', 0))
|
||||
|
||||
# Update aggregated metrics
|
||||
self.aggregated_metrics['total_violations'] = sum(self.violation_counts.values())
|
||||
|
||||
# Emit updated analytics
|
||||
self._emit_analytics_update()
|
||||
|
||||
print(f"📊 Registered violation in analytics: {violation_type}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error registering violation in analytics: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _emit_analytics_update(self):
|
||||
"""Emit analytics update signal with current data"""
|
||||
try:
|
||||
self.analytics_updated.emit(self.get_analytics())
|
||||
except Exception as e:
|
||||
print(f"❌ Error emitting analytics update: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
1085
qt_app_pyside1/controllers/bytetrack_demo.py
Normal file
1085
qt_app_pyside1/controllers/bytetrack_demo.py
Normal file
File diff suppressed because it is too large
Load Diff
550
qt_app_pyside1/controllers/bytetrack_tracker.py
Normal file
550
qt_app_pyside1/controllers/bytetrack_tracker.py
Normal file
@@ -0,0 +1,550 @@
|
||||
# ByteTrack implementation for vehicle tracking
|
||||
# Efficient and robust multi-object tracking that works exactly like DeepSORT
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
from collections import defaultdict, deque
|
||||
from typing import List, Dict, Any, Tuple, Optional
|
||||
import torch
|
||||
|
||||
class Track:
|
||||
"""Track class for ByteTracker - Compatible with video_controller_new.py"""
|
||||
|
||||
def __init__(self, detection, track_id):
|
||||
"""Initialize a track from a detection
|
||||
|
||||
Args:
|
||||
detection: Detection array [x1, y1, x2, y2, score, class_id]
|
||||
track_id: Unique track ID
|
||||
"""
|
||||
self.track_id = track_id
|
||||
self.tlbr = detection[:4] # [x1, y1, x2, y2]
|
||||
self.score = detection[4] if len(detection) > 4 else 0.5
|
||||
self.class_id = int(detection[5]) if len(detection) > 5 else 0
|
||||
|
||||
self.time_since_update = 0
|
||||
self.hits = 1 # Number of times track was matched to a detection
|
||||
self.age = 1
|
||||
self.frame_id = 0 # Will be set by the tracker during update
|
||||
self.is_lost = False # Flag to indicate if track is lost
|
||||
self.state = 'Tentative' # Track state: Tentative, Confirmed, Deleted
|
||||
|
||||
# Store position history for movement tracking
|
||||
self.history = deque(maxlen=30)
|
||||
self.history.append(self.tlbr.copy())
|
||||
|
||||
# Simple velocity estimation
|
||||
self.velocity = np.array([0., 0.])
|
||||
|
||||
def predict(self):
|
||||
"""Predict the next state using simple motion model"""
|
||||
if len(self.history) >= 2:
|
||||
# Simple velocity estimation from last two positions
|
||||
curr_center = np.array([(self.tlbr[0] + self.tlbr[2])/2, (self.tlbr[1] + self.tlbr[3])/2])
|
||||
prev_tlbr = self.history[-2]
|
||||
prev_center = np.array([(prev_tlbr[0] + prev_tlbr[2])/2, (prev_tlbr[1] + prev_tlbr[3])/2])
|
||||
self.velocity = curr_center - prev_center
|
||||
|
||||
# Predict next position
|
||||
next_center = curr_center + self.velocity
|
||||
w, h = self.tlbr[2] - self.tlbr[0], self.tlbr[3] - self.tlbr[1]
|
||||
self.tlbr = np.array([next_center[0] - w/2, next_center[1] - h/2,
|
||||
next_center[0] + w/2, next_center[1] + h/2])
|
||||
|
||||
self.age += 1
|
||||
self.time_since_update += 1
|
||||
|
||||
def update(self, detection):
|
||||
"""Update track with new detection"""
|
||||
self.tlbr = detection[:4]
|
||||
self.score = detection[4] if len(detection) > 4 else self.score
|
||||
self.class_id = int(detection[5]) if len(detection) > 5 else self.class_id
|
||||
|
||||
self.hits += 1
|
||||
self.time_since_update = 0
|
||||
self.history.append(self.tlbr.copy())
|
||||
|
||||
# Update state to confirmed after enough hits
|
||||
if self.state == 'Tentative' and self.hits >= 3:
|
||||
self.state = 'Confirmed'
|
||||
|
||||
def mark_missed(self):
|
||||
"""Mark track as missed (no detection matched)"""
|
||||
self.time_since_update += 1
|
||||
if self.time_since_update > 1:
|
||||
self.is_lost = True
|
||||
|
||||
def is_confirmed(self):
|
||||
"""Check if track is confirmed (has enough hits)"""
|
||||
return self.state == 'Confirmed'
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert track to dictionary format for video_controller_new.py"""
|
||||
return {
|
||||
'id': self.track_id,
|
||||
'bbox': [float(self.tlbr[0]), float(self.tlbr[1]), float(self.tlbr[2]), float(self.tlbr[3])],
|
||||
'confidence': float(self.score),
|
||||
'class_id': int(self.class_id)
|
||||
}
|
||||
|
||||
|
||||
class BYTETracker:
|
||||
"""
|
||||
ByteTrack tracker implementation
|
||||
Designed to work exactly like DeepSORT with video_controller_new.py
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
track_thresh=0.5,
|
||||
track_buffer=30,
|
||||
match_thresh=0.7,
|
||||
frame_rate=30,
|
||||
track_high_thresh=0.6,
|
||||
track_low_thresh=0.1
|
||||
):
|
||||
self.tracked_tracks = [] # Active tracks being tracked
|
||||
self.lost_tracks = [] # Lost tracks (temporarily out of view)
|
||||
self.removed_tracks = [] # Removed tracks (permanently lost)
|
||||
|
||||
self.frame_id = 0
|
||||
self.max_time_lost = int(frame_rate / 30.0 * track_buffer)
|
||||
|
||||
self.track_thresh = track_thresh # Threshold for high-confidence detections
|
||||
self.track_high_thresh = track_high_thresh # Higher threshold for first association
|
||||
self.track_low_thresh = track_low_thresh # Lower threshold for second association
|
||||
self.match_thresh = match_thresh # IOU match threshold
|
||||
|
||||
self.track_id_count = 0
|
||||
|
||||
print(f"[BYTETRACK] Initialized with: high_thresh={track_high_thresh}, " +
|
||||
f"low_thresh={track_low_thresh}, match_thresh={match_thresh}, max_time_lost={self.max_time_lost}")
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
"""Update tracks with new detections
|
||||
|
||||
Args:
|
||||
detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
frame: Optional BGR frame for debug visualization
|
||||
|
||||
Returns:
|
||||
list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
"""
|
||||
self.frame_id += 1
|
||||
|
||||
# Convert detections to internal format
|
||||
converted_detections = self._convert_detections(detections)
|
||||
|
||||
print(f"[BYTETRACK] Frame {self.frame_id}: Processing {len(converted_detections)} detections")
|
||||
print(f"[BYTETRACK] Current state: {len(self.tracked_tracks)} tracked, {len(self.lost_tracks)} lost")
|
||||
|
||||
# Handle empty detections case
|
||||
if len(converted_detections) == 0:
|
||||
print(f"[BYTETRACK] No valid detections in frame {self.frame_id}")
|
||||
# Move all tracked to lost and update
|
||||
for track in self.tracked_tracks:
|
||||
track.mark_missed()
|
||||
track.predict()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
self.lost_tracks.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
# Update lost tracks
|
||||
updated_lost = []
|
||||
for track in self.lost_tracks:
|
||||
track.predict()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
updated_lost.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
self.tracked_tracks = []
|
||||
self.lost_tracks = updated_lost
|
||||
return []
|
||||
|
||||
# Split detections into high and low confidence
|
||||
confidence_values = converted_detections[:, 4].astype(float)
|
||||
high_indices = confidence_values >= self.track_high_thresh
|
||||
low_indices = (confidence_values >= self.track_low_thresh) & (confidence_values < self.track_high_thresh)
|
||||
|
||||
high_detections = converted_detections[high_indices]
|
||||
low_detections = converted_detections[low_indices]
|
||||
|
||||
print(f"[BYTETRACK] Split into {len(high_detections)} high-conf and {len(low_detections)} low-conf detections")
|
||||
|
||||
# Predict all tracks
|
||||
for track in self.tracked_tracks + self.lost_tracks:
|
||||
track.predict()
|
||||
|
||||
# First association: high-confidence detections with tracked tracks
|
||||
matches1, unmatched_tracks1, unmatched_dets1 = self._associate(
|
||||
high_detections, self.tracked_tracks, self.match_thresh)
|
||||
|
||||
# Update matched tracks
|
||||
for match in matches1:
|
||||
track_idx, det_idx = match
|
||||
self.tracked_tracks[track_idx].update(high_detections[det_idx])
|
||||
self.tracked_tracks[track_idx].frame_id = self.frame_id
|
||||
|
||||
# Move unmatched tracks to lost
|
||||
unmatched_tracked_tracks = []
|
||||
for idx in unmatched_tracks1:
|
||||
track = self.tracked_tracks[idx]
|
||||
track.mark_missed()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
self.lost_tracks.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
# Keep only matched tracks
|
||||
self.tracked_tracks = [self.tracked_tracks[i] for i in range(len(self.tracked_tracks)) if i not in unmatched_tracks1]
|
||||
|
||||
# Second association: remaining high-conf detections with lost tracks
|
||||
if len(unmatched_dets1) > 0 and len(self.lost_tracks) > 0:
|
||||
remaining_high_dets = high_detections[unmatched_dets1]
|
||||
matches2, unmatched_tracks2, unmatched_dets2 = self._associate(
|
||||
remaining_high_dets, self.lost_tracks, self.match_thresh)
|
||||
|
||||
# Reactivate matched lost tracks
|
||||
for match in matches2:
|
||||
track_idx, det_idx = match
|
||||
track = self.lost_tracks[track_idx]
|
||||
track.update(remaining_high_dets[det_idx])
|
||||
track.frame_id = self.frame_id
|
||||
track.is_lost = False
|
||||
self.tracked_tracks.append(track)
|
||||
|
||||
# Remove reactivated tracks from lost
|
||||
self.lost_tracks = [self.lost_tracks[i] for i in range(len(self.lost_tracks)) if i not in [m[0] for m in matches2]]
|
||||
|
||||
# Update unmatched detections indices
|
||||
final_unmatched_dets = [unmatched_dets1[i] for i in unmatched_dets2]
|
||||
else:
|
||||
final_unmatched_dets = unmatched_dets1
|
||||
|
||||
# Third association: low-confidence detections with remaining lost tracks
|
||||
if len(low_detections) > 0 and len(self.lost_tracks) > 0:
|
||||
matches3, unmatched_tracks3, unmatched_dets3 = self._associate(
|
||||
low_detections, self.lost_tracks, self.match_thresh)
|
||||
|
||||
# Reactivate matched lost tracks
|
||||
for match in matches3:
|
||||
track_idx, det_idx = match
|
||||
track = self.lost_tracks[track_idx]
|
||||
track.update(low_detections[det_idx])
|
||||
track.frame_id = self.frame_id
|
||||
track.is_lost = False
|
||||
self.tracked_tracks.append(track)
|
||||
|
||||
# Remove reactivated tracks from lost
|
||||
self.lost_tracks = [self.lost_tracks[i] for i in range(len(self.lost_tracks)) if i not in [m[0] for m in matches3]]
|
||||
|
||||
# Create new tracks for remaining unmatched high-confidence detections
|
||||
new_tracks_created = 0
|
||||
for det_idx in final_unmatched_dets:
|
||||
detection = high_detections[det_idx]
|
||||
if detection[4] >= self.track_thresh: # Only create tracks for high-confidence detections
|
||||
self.track_id_count += 1
|
||||
new_track = Track(detection, self.track_id_count)
|
||||
new_track.frame_id = self.frame_id
|
||||
self.tracked_tracks.append(new_track)
|
||||
new_tracks_created += 1
|
||||
|
||||
# Clean up lost tracks that have been lost too long
|
||||
updated_lost = []
|
||||
removed_count = 0
|
||||
for track in self.lost_tracks:
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
updated_lost.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
removed_count += 1
|
||||
self.lost_tracks = updated_lost
|
||||
|
||||
print(f"[BYTETRACK] Matched {len(matches1)} tracks, created {new_tracks_created} new tracks, removed {removed_count} expired tracks")
|
||||
print(f"[BYTETRACK] Final state: {len(self.tracked_tracks)} tracked, {len(self.lost_tracks)} lost")
|
||||
|
||||
# Return confirmed tracks in dictionary format
|
||||
confirmed_tracks = []
|
||||
for track in self.tracked_tracks:
|
||||
if track.is_confirmed():
|
||||
confirmed_tracks.append(track.to_dict())
|
||||
|
||||
print(f"[BYTETRACK] Returning {len(confirmed_tracks)} confirmed tracks")
|
||||
return confirmed_tracks
|
||||
|
||||
def _convert_detections(self, detections):
|
||||
"""Convert detection format to numpy array"""
|
||||
if len(detections) == 0:
|
||||
return np.empty((0, 6))
|
||||
|
||||
converted = []
|
||||
for det in detections:
|
||||
bbox = det.get('bbox', [0, 0, 0, 0])
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', 0)
|
||||
|
||||
# Ensure bbox is valid
|
||||
if len(bbox) == 4 and bbox[2] > bbox[0] and bbox[3] > bbox[1]:
|
||||
converted.append([float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3]), float(conf), int(class_id)])
|
||||
|
||||
return np.array(converted) if converted else np.empty((0, 6))
|
||||
|
||||
def _associate(self, detections, tracks, iou_threshold):
|
||||
"""Associate detections with tracks using IoU"""
|
||||
if len(detections) == 0 or len(tracks) == 0:
|
||||
return [], list(range(len(tracks))), list(range(len(detections)))
|
||||
|
||||
# Calculate IoU matrix
|
||||
iou_matrix = self._calculate_iou_matrix(detections[:, :4], np.array([track.tlbr for track in tracks]))
|
||||
|
||||
# Use Hungarian algorithm (simplified greedy approach)
|
||||
matches, unmatched_tracks, unmatched_detections = self._linear_assignment(iou_matrix, iou_threshold)
|
||||
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
def _calculate_iou_matrix(self, detections, tracks):
|
||||
"""Calculate IoU matrix between detections and tracks"""
|
||||
if len(detections) == 0 or len(tracks) == 0:
|
||||
return np.empty((0, 0))
|
||||
|
||||
# Calculate areas
|
||||
det_areas = (detections[:, 2] - detections[:, 0]) * (detections[:, 3] - detections[:, 1])
|
||||
track_areas = (tracks[:, 2] - tracks[:, 0]) * (tracks[:, 3] - tracks[:, 1])
|
||||
|
||||
# Calculate intersections
|
||||
ious = np.zeros((len(detections), len(tracks)))
|
||||
for i, det in enumerate(detections):
|
||||
for j, track in enumerate(tracks):
|
||||
# Intersection coordinates
|
||||
x1 = max(det[0], track[0])
|
||||
y1 = max(det[1], track[1])
|
||||
x2 = min(det[2], track[2])
|
||||
y2 = min(det[3], track[3])
|
||||
|
||||
if x2 > x1 and y2 > y1:
|
||||
intersection = (x2 - x1) * (y2 - y1)
|
||||
union = det_areas[i] + track_areas[j] - intersection
|
||||
ious[i, j] = intersection / union if union > 0 else 0
|
||||
else:
|
||||
ious[i, j] = 0
|
||||
|
||||
return ious
|
||||
|
||||
def _linear_assignment(self, cost_matrix, threshold):
|
||||
"""Simple greedy assignment based on IoU threshold"""
|
||||
matches = []
|
||||
unmatched_tracks = list(range(cost_matrix.shape[1]))
|
||||
unmatched_detections = list(range(cost_matrix.shape[0]))
|
||||
|
||||
if cost_matrix.size == 0:
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
# Find matches above threshold
|
||||
for i in range(cost_matrix.shape[0]):
|
||||
for j in range(cost_matrix.shape[1]):
|
||||
if cost_matrix[i, j] >= threshold:
|
||||
if i in unmatched_detections and j in unmatched_tracks:
|
||||
matches.append([j, i]) # [track_idx, det_idx]
|
||||
unmatched_tracks.remove(j)
|
||||
unmatched_detections.remove(i)
|
||||
break
|
||||
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
|
||||
class ByteTrackVehicleTracker:
|
||||
"""
|
||||
ByteTrack-based vehicle tracker with exact same API as DeepSortVehicleTracker
|
||||
for drop-in replacement in video_controller_new.py
|
||||
"""
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
print("[BYTETRACK SINGLETON] Creating ByteTrackVehicleTracker instance")
|
||||
cls._instance = super(ByteTrackVehicleTracker, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if getattr(self, '_initialized', False):
|
||||
return
|
||||
print("[BYTETRACK INIT] Initializing ByteTrack tracker")
|
||||
|
||||
# Parameters optimized for vehicle tracking in traffic scenes
|
||||
self.tracker = BYTETracker(
|
||||
track_thresh=0.4, # Minimum confidence to create new tracks
|
||||
track_buffer=30, # How many frames to keep lost tracks
|
||||
match_thresh=0.7, # IoU threshold for matching
|
||||
track_high_thresh=0.5, # High confidence threshold for first association
|
||||
track_low_thresh=0.2, # Low confidence threshold for second association
|
||||
frame_rate=30 # Expected frame rate
|
||||
)
|
||||
|
||||
self._initialized = True
|
||||
self.debug = True # Enable debug output
|
||||
|
||||
# Memory management
|
||||
self.max_removed_tracks = 100 # Limit removed tracks to prevent memory issues
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
"""
|
||||
Update tracker with new detections - EXACT API as DeepSORT
|
||||
|
||||
Args:
|
||||
detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
frame: BGR image (optional)
|
||||
|
||||
Returns:
|
||||
list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
"""
|
||||
try:
|
||||
# Input validation
|
||||
if not isinstance(detections, list):
|
||||
print(f"[BYTETRACK ERROR] Invalid detections format: {type(detections)}")
|
||||
return []
|
||||
|
||||
# Process detections
|
||||
valid_dets = []
|
||||
for i, det in enumerate(detections):
|
||||
if not isinstance(det, dict):
|
||||
continue
|
||||
|
||||
bbox = det.get('bbox')
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', 0)
|
||||
|
||||
if bbox is not None and len(bbox) == 4:
|
||||
x1, y1, x2, y2 = map(float, bbox)
|
||||
conf = float(conf)
|
||||
class_id = int(class_id)
|
||||
|
||||
# Validate bbox dimensions
|
||||
if x2 > x1 and y2 > y1 and conf > 0.1:
|
||||
valid_dets.append({
|
||||
'bbox': [x1, y1, x2, y2],
|
||||
'confidence': conf,
|
||||
'class_id': class_id
|
||||
})
|
||||
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Processing {len(valid_dets)} valid detections")
|
||||
|
||||
# Update tracker
|
||||
tracks = self.tracker.update(valid_dets, frame)
|
||||
|
||||
# Memory management - limit removed tracks
|
||||
if len(self.tracker.removed_tracks) > self.max_removed_tracks:
|
||||
self.tracker.removed_tracks = self.tracker.removed_tracks[-self.max_removed_tracks//2:]
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Cleaned up removed tracks, keeping last {len(self.tracker.removed_tracks)}")
|
||||
|
||||
return tracks
|
||||
|
||||
except Exception as e:
|
||||
print(f"[BYTETRACK ERROR] Error updating tracker: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracks(self, detections, frame=None):
|
||||
"""
|
||||
Update method for compatibility with DeepSORT interface used by model_manager.py
|
||||
|
||||
Args:
|
||||
detections: list of detection arrays in format [bbox_xywh, conf, class_name]
|
||||
frame: BGR image (optional)
|
||||
|
||||
Returns:
|
||||
list of track objects with DeepSORT-compatible interface including is_confirmed() method
|
||||
"""
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] update_tracks called with {len(detections)} detections")
|
||||
|
||||
# Convert from DeepSORT format to ByteTrack format
|
||||
converted_dets = []
|
||||
|
||||
for det in detections:
|
||||
try:
|
||||
# Handle different detection formats
|
||||
if isinstance(det, (list, tuple)) and len(det) >= 2:
|
||||
# DeepSORT format: [bbox_xywh, conf, class_name]
|
||||
bbox_xywh, conf = det[:2]
|
||||
class_name = det[2] if len(det) > 2 else 'vehicle'
|
||||
|
||||
# Convert [x, y, w, h] to [x1, y1, x2, y2] with type validation
|
||||
if isinstance(bbox_xywh, (list, tuple, np.ndarray)) and len(bbox_xywh) == 4:
|
||||
x, y, w, h = map(float, bbox_xywh)
|
||||
conf = float(conf)
|
||||
|
||||
converted_dets.append({
|
||||
'bbox': [x, y, x + w, y + h],
|
||||
'confidence': conf,
|
||||
'class_id': 0 # Default vehicle class
|
||||
})
|
||||
else:
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Skipping invalid detection format: {det}")
|
||||
except Exception as e:
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Error converting detection: {e}")
|
||||
|
||||
# Call the regular update method to get dictionary tracks
|
||||
dict_tracks = self.update(converted_dets, frame)
|
||||
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Converting {len(dict_tracks)} dict tracks to DeepSORT-compatible objects")
|
||||
|
||||
# Create DeepSORT compatible track objects from dictionaries
|
||||
ds_tracks = []
|
||||
for track_data in dict_tracks:
|
||||
ds_track = ByteTrackOutput(track_data)
|
||||
ds_tracks.append(ds_track)
|
||||
|
||||
return ds_tracks
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset the tracker to clean state - starts track IDs from 1
|
||||
Call this when starting a new video or session
|
||||
"""
|
||||
print("[BYTETRACK] Resetting tracker state")
|
||||
if hasattr(self, 'tracker') and self.tracker is not None:
|
||||
# Reset the internal BYTETracker
|
||||
self.tracker.tracked_tracks = []
|
||||
self.tracker.lost_tracks = []
|
||||
self.tracker.removed_tracks = []
|
||||
self.tracker.frame_id = 0
|
||||
self.tracker.track_id_count = 0 # Reset ID counter to start from 1
|
||||
|
||||
print("[BYTETRACK] Reset complete - track IDs will start from 1")
|
||||
else:
|
||||
print("[BYTETRACK] Warning: Tracker not initialized, nothing to reset")
|
||||
|
||||
|
||||
class ByteTrackOutput:
|
||||
"""
|
||||
Adapter class to make ByteTrack output compatible with DeepSORT interface
|
||||
"""
|
||||
|
||||
def __init__(self, track_data):
|
||||
"""Initialize from ByteTrack track dictionary"""
|
||||
self.track_id = track_data.get('id', -1)
|
||||
self.det_index = track_data.get('det_index', -1)
|
||||
self.to_tlwh_ret = track_data.get('bbox', [0, 0, 0, 0]) # [x, y, w, h]
|
||||
self.bbox = track_data.get('bbox', [0, 0, 0, 0]) # Add bbox property
|
||||
self.confidence = track_data.get('confidence', 0.0)
|
||||
self.is_confirmed = track_data.get('is_confirmed', True)
|
||||
# Store the original track data
|
||||
self._track_data = track_data
|
||||
|
||||
def to_tlwh(self):
|
||||
"""Return bounding box in [x, y, w, h] format"""
|
||||
return self.to_tlwh_ret
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Fallback to original track data"""
|
||||
if name in self._track_data:
|
||||
return self._track_data[name]
|
||||
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
|
||||
103
qt_app_pyside1/controllers/deepsort_tracker.py
Normal file
103
qt_app_pyside1/controllers/deepsort_tracker.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# DeepSORT integration for vehicle tracking
|
||||
# You need to install deep_sort_realtime: pip install deep_sort_realtime
|
||||
from deep_sort_realtime.deepsort_tracker import DeepSort
|
||||
|
||||
class DeepSortVehicleTracker:
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
print("[DEEPSORT SINGLETON] Creating DeepSortVehicleTracker instance")
|
||||
cls._instance = super(DeepSortVehicleTracker, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if getattr(self, '_initialized', False):
|
||||
return
|
||||
print("[DEEPSORT INIT] Initializing DeepSort tracker (should only see this once)")
|
||||
# Use DeepSORT with better parameters to reduce duplicate IDs
|
||||
self.tracker = DeepSort(
|
||||
max_age=50, # Keep tracks longer to avoid re-creating IDs
|
||||
n_init=3, # Require 3 consecutive detections before confirming track
|
||||
nms_max_overlap=0.3, # Stricter NMS to avoid duplicate detections
|
||||
max_cosine_distance=0.4, # Stricter appearance matching
|
||||
nn_budget=100, # Budget for appearance features
|
||||
gating_only_position=False # Use both position and appearance for gating
|
||||
)
|
||||
self._initialized = True
|
||||
self.track_id_counter = {} # Track seen IDs to detect duplicates
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
# detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
# frame: BGR image (optional, for appearance embedding)
|
||||
# Returns: list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
|
||||
# Convert detections to DeepSORT format with validation
|
||||
ds_detections = []
|
||||
for i, det in enumerate(detections):
|
||||
bbox = det.get('bbox')
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', -1)
|
||||
|
||||
if bbox is not None and len(bbox) == 4:
|
||||
x1, y1, x2, y2 = bbox
|
||||
# Validate bbox dimensions
|
||||
if x2 > x1 and y2 > y1 and conf > 0.3: # Higher confidence threshold
|
||||
# Convert to [x1, y1, width, height] format expected by DeepSORT
|
||||
bbox_xywh = [x1, y1, x2-x1, y2-y1]
|
||||
ds_detections.append([bbox_xywh, conf, class_id])
|
||||
print(f"[DEEPSORT] Added detection {i}: bbox={bbox_xywh}, conf={conf:.2f}")
|
||||
else:
|
||||
print(f"[DEEPSORT] Rejected detection {i}: invalid bbox or low confidence")
|
||||
else:
|
||||
print(f"[DEEPSORT] Rejected detection {i}: invalid bbox format")
|
||||
|
||||
print(f"[DEEPSORT] Processing {len(ds_detections)} valid detections")
|
||||
|
||||
# Update tracker with frame for appearance features
|
||||
if frame is not None:
|
||||
tracks = self.tracker.update_tracks(ds_detections, frame=frame)
|
||||
else:
|
||||
tracks = self.tracker.update_tracks(ds_detections)
|
||||
|
||||
# Process results and check for duplicate IDs
|
||||
results = []
|
||||
current_ids = []
|
||||
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
|
||||
track_id = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
conf = track.det_conf if hasattr(track, 'det_conf') else 0.0
|
||||
class_id = track.det_class if hasattr(track, 'det_class') else -1
|
||||
|
||||
# Check for duplicate IDs
|
||||
if track_id in current_ids:
|
||||
print(f"[DEEPSORT ERROR] DUPLICATE ID DETECTED: {track_id}")
|
||||
continue # Skip this duplicate
|
||||
|
||||
current_ids.append(track_id)
|
||||
|
||||
# Convert back to [x1, y1, x2, y2] format
|
||||
x1, y1, x2, y2 = ltrb
|
||||
bbox_xyxy = [x1, y1, x2, y2]
|
||||
|
||||
results.append({
|
||||
'id': track_id,
|
||||
'bbox': bbox_xyxy,
|
||||
'confidence': conf,
|
||||
'class_id': class_id
|
||||
})
|
||||
|
||||
conf_str = f"{conf:.2f}" if conf is not None else "None"
|
||||
print(f"[DEEPSORT] Track ID={track_id}: bbox={bbox_xyxy}, conf={conf_str}")
|
||||
|
||||
# Update ID counter for statistics
|
||||
for track_id in current_ids:
|
||||
self.track_id_counter[track_id] = self.track_id_counter.get(track_id, 0) + 1
|
||||
|
||||
print(f"[DEEPSORT] Returning {len(results)} confirmed tracks")
|
||||
return results
|
||||
173
qt_app_pyside1/controllers/difference.py
Normal file
173
qt_app_pyside1/controllers/difference.py
Normal file
@@ -0,0 +1,173 @@
|
||||
# Detailed Comparison: video_controller_new.py vs video_controller_finale.py
|
||||
#
|
||||
# This document provides a function-by-function, block-by-block comparison between `video_controller_new.py` and `video_controller_finale.py` as of July 2025. It highlights what is present, missing, or different in each file, and explains the practical impact of those differences for real-world red light violation detection and video analytics.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Table of Contents
|
||||
# - [Overall Structure](#overall-structure)
|
||||
# - [Class/Function Inventory](#classfunction-inventory)
|
||||
# - [Function-by-Function Comparison](#function-by-function-comparison)
|
||||
# - [__init__](#__init__)
|
||||
# - [set_source](#set_source)
|
||||
# - [_get_source_properties](#_get_source_properties)
|
||||
# - [start/stop](#startstop)
|
||||
# - [_run](#_run)
|
||||
# - [_process_frame](#_process_frame)
|
||||
# - [detect_red_light_violations](#detect_red_light_violations)
|
||||
# - [Key Differences and Impact](#key-differences-and-impact)
|
||||
# - [Summary Table](#summary-table)
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Overall Structure
|
||||
#
|
||||
# - **video_controller_new.py**
|
||||
# - Modernized, modular, and debug-heavy.
|
||||
# - Uses enhanced annotation utilities, more robust fallback logic, and detailed debug output.
|
||||
# - Violation detection logic is inlined and self-contained.
|
||||
# - State machine for per-vehicle violation tracking is explicit and debugged.
|
||||
# - Crosswalk/violation line detection is always run, with fallback.
|
||||
# - Always emits overlays and signals, even if no violators.
|
||||
#
|
||||
# - **video_controller_finale.py**
|
||||
# - Reference implementation, known to work reliably in production.
|
||||
# - May use some different utility imports and slightly different state handling.
|
||||
# - Violation detection logic may be more tightly coupled to tracker or external detector class.
|
||||
# - Debug output is present but may be less granular.
|
||||
# - Fallbacks for violation line and traffic light are robust.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Class/Function Inventory
|
||||
#
|
||||
# | Function/Class | In New | In Finale | Notes |
|
||||
# |-------------------------------|--------|-----------|-------|
|
||||
# | VideoController | ✔ | ✔ | Main class in both |
|
||||
# | __init__ | ✔ | ✔ | New: more debug, explicit tracker, fallback logic |
|
||||
# | set_source | ✔ | ✔ | Similar, new has more robust type handling |
|
||||
# | _get_source_properties | ✔ | ✔ | Similar, new has more debug |
|
||||
# | start/stop | ✔ | ✔ | Similar, new has more debug |
|
||||
# | _run | ✔ | ✔ | New: more debug, more robust detection/tracking |
|
||||
# | _process_frame | ✔ | ✔ | New: always runs crosswalk, overlays, fallback |
|
||||
# | detect_red_light_violations | ✔ | ✔ | New: inlined, explicit state machine, more debug |
|
||||
# | violation_detector (external) | ✖ | ✔ | Finale may use RedLightViolationDetector class |
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Function-by-Function Comparison
|
||||
#
|
||||
# ### __init__
|
||||
# - **New:**
|
||||
# - Sets up all state, tracker, and debug counters.
|
||||
# - Imports and initializes crosswalk detection utilities with try/except.
|
||||
# - Does NOT use external `RedLightViolationDetector` (commented out).
|
||||
# - Uses inlined `detect_red_light_violations` method.
|
||||
# - **Finale:**
|
||||
# - May use external `RedLightViolationDetector` class for violation logic.
|
||||
# - Similar state setup, but possibly less debug output.
|
||||
#
|
||||
# ### set_source
|
||||
# - **New:**
|
||||
# - Handles all source types robustly (file, camera, URL, device).
|
||||
# - More debug output for every branch.
|
||||
# - **Finale:**
|
||||
# - Similar logic, possibly less robust for edge cases.
|
||||
#
|
||||
# ### _get_source_properties
|
||||
# - **New:**
|
||||
# - More debug output, retries for camera sources.
|
||||
# - **Finale:**
|
||||
# - Similar, but may not retry as aggressively.
|
||||
#
|
||||
# ### start/stop
|
||||
# - **New:**
|
||||
# - More debug output, aggressive render timer (10ms).
|
||||
# - **Finale:**
|
||||
# - Standard start/stop, less debug.
|
||||
#
|
||||
# ### _run
|
||||
# - **New:**
|
||||
# - Handles detection, tracking, and annotation in one loop.
|
||||
# - Always normalizes class names.
|
||||
# - Always draws overlays and emits signals.
|
||||
# - More debug output for every step.
|
||||
# - **Finale:**
|
||||
# - Similar, but may use external violation detector.
|
||||
# - May not always emit overlays if no detections.
|
||||
#
|
||||
# ### _process_frame
|
||||
# - **New:**
|
||||
# - Always runs crosswalk/violation line detection.
|
||||
# - Always overlays violation line and traffic light status.
|
||||
# - Only runs violation detection if both red light and violation line are present.
|
||||
# - Always emits overlays/signals, even if no violators.
|
||||
# - More robust fallback for violation line.
|
||||
# - More debug output for every step.
|
||||
# - **Finale:**
|
||||
# - Similar, but may skip overlays if no detections.
|
||||
# - May use external violation detector.
|
||||
#
|
||||
# ### detect_red_light_violations
|
||||
# - **New:**
|
||||
# - Inlined, explicit state machine for per-vehicle tracking.
|
||||
# - Requires vehicle to be behind the line before crossing during red.
|
||||
# - Cooldown logic to prevent duplicate violations.
|
||||
# - Extensive debug output for every vehicle, every frame.
|
||||
# - **Finale:**
|
||||
# - May use external class for violation logic.
|
||||
# - Similar state machine, but less debug output.
|
||||
# - May have slightly different fallback/cooldown logic.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Key Differences and Impact
|
||||
#
|
||||
# - **External Violation Detector:**
|
||||
# - Finale uses `RedLightViolationDetector` class; New inlines the logic.
|
||||
# - Impact: New is easier to debug and modify, but harder to swap out logic.
|
||||
#
|
||||
# - **Debug Output:**
|
||||
# - New has much more granular debug output for every step and every vehicle.
|
||||
# - Impact: Easier to diagnose issues in New.
|
||||
#
|
||||
# - **Fallback Logic:**
|
||||
# - Both have robust fallback for violation line and traffic light, but New is more explicit.
|
||||
#
|
||||
# - **Overlay/Signal Emission:**
|
||||
# - New always emits overlays and signals, even if no violators.
|
||||
# - Finale may skip if no detections.
|
||||
#
|
||||
# - **State Machine:**
|
||||
# - New's state machine is explicit, per-vehicle, and debugged.
|
||||
# - Finale's may be more implicit or handled in external class.
|
||||
#
|
||||
# - **Modularity:**
|
||||
# - Finale is more modular (external detector class), New is more monolithic but easier to trace.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Summary Table
|
||||
#
|
||||
# | Feature/Function | video_controller_new.py | video_controller_finale.py |
|
||||
# |---------------------------------|:----------------------:|:-------------------------:|
|
||||
# | External Violation Detector | ✖ | ✔ |
|
||||
# | Inlined Violation Logic | ✔ | ✖ |
|
||||
# | Robust Fallbacks | ✔ | ✔ |
|
||||
# | Always Emits Overlays/Signals | ✔ | ✖/Partial |
|
||||
# | Extensive Debug Output | ✔ | ✖/Partial |
|
||||
# | Per-Vehicle State Machine | ✔ | ✔ |
|
||||
# | Modularity | ✖ | ✔ |
|
||||
# | Easy to Debug/Modify | ✔ | ✖/Partial |
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Conclusion
|
||||
#
|
||||
# - Use `video_controller_new.py` for maximum debug visibility, easier modification, and robust fallback logic.
|
||||
# - Use `video_controller_finale.py` for production-proven modularity and if you want to swap out violation logic easily.
|
||||
# - Both are robust, but the new version is more transparent and easier to debug in real-world scenarios.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# *This file is auto-generated for developer reference. Update as code evolves.*
|
||||
394
qt_app_pyside1/controllers/embedder_import_patch.py
Normal file
394
qt_app_pyside1/controllers/embedder_import_patch.py
Normal file
@@ -0,0 +1,394 @@
|
||||
from deep_sort_realtime.embedder.embedder_pytorch import MobileNetV2_Embedder
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import OpenVINO modules
|
||||
from detection_openvino import OpenVINOVehicleDetector
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
|
||||
# Import from our utils package
|
||||
from utils.helpers import bbox_iou
|
||||
|
||||
class ModelManager:
|
||||
"""
|
||||
Manages OpenVINO models for traffic detection and violation monitoring.
|
||||
Only uses RedLightViolationPipeline for all violation/crosswalk/traffic light logic.
|
||||
"""
|
||||
def __init__(self, config_file: str = None):
|
||||
"""
|
||||
Initialize model manager with configuration.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.detector = None
|
||||
self.violation_pipeline = None # Use RedLightViolationPipeline only
|
||||
self.tracker = None
|
||||
self._initialize_models()
|
||||
|
||||
def _load_config(self, config_file: Optional[str]) -> Dict:
|
||||
"""
|
||||
Load configuration from file or use defaults.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
"""
|
||||
import json
|
||||
default_config = {
|
||||
"detection": {
|
||||
"confidence_threshold": 0.5,
|
||||
"enable_ocr": True,
|
||||
"enable_tracking": True,
|
||||
"model_path": None
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": True,
|
||||
"show_labels": True,
|
||||
"show_license_plates": True
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
}
|
||||
}
|
||||
|
||||
if config_file and os.path.exists(config_file):
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
loaded_config = json.load(f)
|
||||
# Merge with defaults (preserving loaded values)
|
||||
for section in default_config:
|
||||
if section in loaded_config:
|
||||
default_config[section].update(loaded_config[section])
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
|
||||
return default_config
|
||||
|
||||
def _initialize_models(self):
|
||||
"""Initialize OpenVINO detection and violation models."""
|
||||
try:
|
||||
# Find best model path
|
||||
model_path = self.config["detection"].get("model_path")
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
model_path = self._find_best_model_path()
|
||||
if not model_path:
|
||||
print("❌ No model found")
|
||||
return
|
||||
|
||||
# Initialize detector
|
||||
print(f"✅ Initializing OpenVINO detector with model: {model_path}")
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
print(f"✅ Using inference device: {device}")
|
||||
self.detector = OpenVINOVehicleDetector(
|
||||
model_path=model_path,
|
||||
device=device,
|
||||
confidence_threshold=self.config["detection"]["confidence_threshold"]
|
||||
)
|
||||
|
||||
# Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic
|
||||
self.violation_pipeline = RedLightViolationPipeline(debug=True)
|
||||
print("✅ Red light violation pipeline initialized (all other violation logic removed)")
|
||||
|
||||
# Initialize tracker if enabled
|
||||
if self.config["detection"]["enable_tracking"]:
|
||||
try:
|
||||
from deep_sort_realtime.deepsort_tracker import DeepSort
|
||||
|
||||
# Use optimized OpenVINO embedder if available
|
||||
use_optimized_embedder = True
|
||||
embedder = None
|
||||
|
||||
if use_optimized_embedder:
|
||||
try:
|
||||
# Try importing our custom OpenVINO embedder
|
||||
from utils.embedder_openvino import OpenVINOEmbedder
|
||||
print(f"✅ Initializing optimized OpenVINO embedder on {device}")
|
||||
|
||||
# Set model_path explicitly to use the user-supplied model
|
||||
script_dir = Path(__file__).parent.parent
|
||||
model_file_path = None
|
||||
|
||||
# Try the copy version first (might be modified for compatibility)
|
||||
copy_model_path = script_dir / "mobilenetv2 copy.xml"
|
||||
original_model_path = script_dir / "mobilenetv2.xml"
|
||||
|
||||
if copy_model_path.exists():
|
||||
model_file_path = str(copy_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
elif original_model_path.exists():
|
||||
model_file_path = str(original_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
|
||||
embedder = OpenVINOEmbedder(
|
||||
model_path=model_file_path,
|
||||
device=device,
|
||||
half=True # Use FP16 for better performance
|
||||
)
|
||||
except Exception as emb_err:
|
||||
print(f"⚠️ OpenVINO embedder failed: {emb_err}, falling back to default")
|
||||
|
||||
# Initialize tracker with embedder based on available parameters
|
||||
if embedder is None:
|
||||
print("⚠️ No embedder available, using DeepSORT with default tracking")
|
||||
else:
|
||||
print("✅ Initializing DeepSORT with custom embedder")
|
||||
|
||||
# Simple initialization without problematic parameters
|
||||
self.tracker = DeepSort(
|
||||
max_age=30,
|
||||
n_init=3,
|
||||
nn_budget=100,
|
||||
embedder=embedder
|
||||
)
|
||||
print("✅ DeepSORT tracker initialized")
|
||||
except ImportError:
|
||||
print("⚠️ DeepSORT not available")
|
||||
self.tracker = None
|
||||
print("✅ Models initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error initializing models: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]:
|
||||
"""
|
||||
Find best available model file in workspace.
|
||||
|
||||
Args:
|
||||
base_model_name: Base model name without extension
|
||||
|
||||
Returns:
|
||||
Path to model file or None
|
||||
"""
|
||||
# Select model based on device if base_model_name is not specified
|
||||
if base_model_name is None:
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
if device == "CPU" or device == "AUTO":
|
||||
# Use yolo11n for CPU - faster, lighter model
|
||||
base_model_name = "yolo11n"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
|
||||
else:
|
||||
# Use yolo11x for GPU - larger model with better accuracy
|
||||
base_model_name = "yolo11x"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
|
||||
|
||||
# Check if the openvino_models directory exists in the current working directory
|
||||
cwd_openvino_dir = Path.cwd() / "openvino_models"
|
||||
if cwd_openvino_dir.exists():
|
||||
direct_path = cwd_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model directly in CWD: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for absolute path to openvino_models (this is the most reliable)
|
||||
absolute_openvino_dir = Path("D:/Downloads/finale6/khatam/openvino_models")
|
||||
if absolute_openvino_dir.exists():
|
||||
direct_path = absolute_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model at absolute path: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Try relative to the model_manager.py file
|
||||
openvino_models_dir = Path(__file__).parent.parent.parent / "openvino_models"
|
||||
direct_path = openvino_models_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model in app directory: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for model in folder structure within openvino_models
|
||||
subfolder_path = openvino_models_dir / f"{base_model_name}_openvino_model" / f"{base_model_name}.xml"
|
||||
if subfolder_path.exists():
|
||||
print(f"✅ Found model in subfolder: {subfolder_path}")
|
||||
return str(subfolder_path.absolute())
|
||||
|
||||
# Try other common locations
|
||||
search_dirs = [
|
||||
".",
|
||||
"..",
|
||||
"../models",
|
||||
"../rcb",
|
||||
"../openvino_models",
|
||||
f"../{base_model_name}_openvino_model",
|
||||
"../..", # Go up to project root
|
||||
"../../openvino_models", # Project root / openvino_models
|
||||
]
|
||||
|
||||
model_extensions = [
|
||||
(f"{base_model_name}.xml", "OpenVINO IR direct"),
|
||||
(f"{base_model_name}_openvino_model/{base_model_name}.xml", "OpenVINO IR"),
|
||||
(f"{base_model_name}.pt", "PyTorch"),
|
||||
]
|
||||
|
||||
for search_dir in search_dirs:
|
||||
search_path = Path(__file__).parent.parent / search_dir
|
||||
if not search_path.exists():
|
||||
continue
|
||||
|
||||
for model_file, model_type in model_extensions:
|
||||
model_path = search_path / model_file
|
||||
if model_path.exists():
|
||||
print(f"✅ Found {model_type} model: {model_path}")
|
||||
return str(model_path.absolute())
|
||||
|
||||
print(f"❌ No model found for {base_model_name}")
|
||||
return None
|
||||
|
||||
def detect(self, frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Detect objects in frame.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
|
||||
Returns:
|
||||
List of detection dictionaries
|
||||
"""
|
||||
if self.detector is None:
|
||||
print("WARNING: No detector available")
|
||||
return []
|
||||
try:
|
||||
# Use a lower confidence threshold for better visibility
|
||||
conf_threshold = max(0.3, self.config["detection"].get("confidence_threshold", 0.5))
|
||||
detections = self.detector.detect_vehicles(frame, conf_threshold=conf_threshold)
|
||||
|
||||
# Add debug output
|
||||
if detections:
|
||||
print(f"DEBUG: Detected {len(detections)} objects: " +
|
||||
", ".join([f"{d['class_name']} ({d['confidence']:.2f})" for d in detections[:3]]))
|
||||
|
||||
# Print bounding box coordinates of first detection
|
||||
if len(detections) > 0:
|
||||
print(f"DEBUG: First detection bbox: {detections[0]['bbox']}")
|
||||
else:
|
||||
print("DEBUG: No detections in this frame")
|
||||
|
||||
return detections
|
||||
except Exception as e:
|
||||
print(f"❌ Detection error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracking(self, detections: List[Dict], frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Update tracking information for detections.
|
||||
|
||||
Args:
|
||||
detections: List of detections
|
||||
frame: Current video frame
|
||||
|
||||
Returns:
|
||||
Updated list of detections with tracking info
|
||||
"""
|
||||
if not self.tracker or not detections:
|
||||
return detections
|
||||
|
||||
try:
|
||||
# Format detections for DeepSORT
|
||||
tracker_dets = []
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
x1, y1, x2, y2 = bbox
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
|
||||
if w <= 0 or h <= 0:
|
||||
continue
|
||||
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_name = det.get('class_name', 'unknown')
|
||||
tracker_dets.append(([x1, y1, w, h], conf, class_name))
|
||||
|
||||
# Update tracks
|
||||
if tracker_dets:
|
||||
tracks = self.tracker.update_tracks(tracker_dets, frame=frame)
|
||||
|
||||
# Associate tracks with detections
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
|
||||
track_id = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
dx1, dy1, dx2, dy2 = bbox
|
||||
iou = bbox_iou((dx1, dy1, dx2, dy2), tuple(map(int, ltrb)))
|
||||
|
||||
if iou > 0.5:
|
||||
det['track_id'] = track_id
|
||||
break
|
||||
return detections
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Tracking error: {e}")
|
||||
return detections
|
||||
|
||||
def update_config(self, new_config: Dict):
|
||||
"""
|
||||
Update configuration parameters.
|
||||
|
||||
Args:
|
||||
new_config: New configuration dictionary
|
||||
"""
|
||||
if not new_config:
|
||||
return
|
||||
|
||||
# Store old device setting to check if it changed
|
||||
old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO"
|
||||
|
||||
# Update configuration
|
||||
for section in new_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(new_config[section])
|
||||
else:
|
||||
self.config[section] = new_config[section]
|
||||
|
||||
# Check if device changed - if so, we need to reinitialize models
|
||||
new_device = self.config["detection"].get("device", "AUTO")
|
||||
device_changed = old_device != new_device
|
||||
|
||||
if device_changed:
|
||||
print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...")
|
||||
# Reinitialize models with new device
|
||||
self._initialize_models()
|
||||
return
|
||||
|
||||
# Just update detector confidence threshold if device didn't change
|
||||
if self.detector:
|
||||
conf_thres = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
self.detector.conf_thres = conf_thres
|
||||
686
qt_app_pyside1/controllers/enhanced_video_controller.py
Normal file
686
qt_app_pyside1/controllers/enhanced_video_controller.py
Normal file
@@ -0,0 +1,686 @@
|
||||
"""
|
||||
Enhanced video controller with async inference and separated FPS tracking
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
from typing import Dict, List, Optional, Tuple, Any
|
||||
from pathlib import Path
|
||||
from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer
|
||||
from PySide6.QtGui import QImage, QPixmap
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import our async detector
|
||||
try:
|
||||
# Try direct import first
|
||||
from detection_openvino_async import OpenVINOVehicleDetector
|
||||
except ImportError:
|
||||
# Fall back to import from project root
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||
from detection_openvino_async import OpenVINOVehicleDetector
|
||||
|
||||
# Import traffic light color detection utility
|
||||
try:
|
||||
from utils.traffic_light_utils import detect_traffic_light_color, draw_traffic_light_status
|
||||
print("✅ Imported traffic light color detection utilities")
|
||||
except ImportError:
|
||||
# Create simple placeholder functions if imports fail
|
||||
def detect_traffic_light_color(frame, bbox):
|
||||
return {"color": "unknown", "confidence": 0.0}
|
||||
|
||||
def draw_traffic_light_status(frame, bbox, color):
|
||||
return frame
|
||||
print("⚠️ Failed to import traffic light color detection utilities")
|
||||
|
||||
# Import utilities for visualization
|
||||
try:
|
||||
# Try the direct import when running inside the qt_app_pyside directory
|
||||
from utils.enhanced_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Successfully imported enhanced_annotation_utils from utils package")
|
||||
except ImportError:
|
||||
try:
|
||||
# Try fully qualified import path
|
||||
from qt_app_pyside.utils.enhanced_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Successfully imported enhanced_annotation_utils from qt_app_pyside.utils package")
|
||||
except ImportError:
|
||||
# Fall back to our minimal implementation
|
||||
print("⚠️ Could not import enhanced_annotation_utils, using fallback implementation")
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||
try:
|
||||
from fallback_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Using fallback_annotation_utils")
|
||||
except ImportError:
|
||||
print("❌ CRITICAL: Could not import annotation utilities! UI will be broken.")
|
||||
# Define minimal stub functions to prevent crashes
|
||||
def enhanced_draw_detections(frame, detections, **kwargs):
|
||||
return frame
|
||||
def draw_performance_overlay(frame, metrics):
|
||||
return frame
|
||||
def enhanced_cv_to_qimage(frame):
|
||||
return None
|
||||
def enhanced_cv_to_pixmap(frame):
|
||||
return None
|
||||
|
||||
class AsyncVideoProcessingThread(QThread):
|
||||
"""Thread for async video processing with separate detection and UI threads."""
|
||||
|
||||
# Signal for UI update with enhanced metadata
|
||||
frame_processed = Signal(np.ndarray, list, dict) # frame, detections, metrics
|
||||
|
||||
# Signal for separate processing metrics
|
||||
stats_updated = Signal(dict) # All performance metrics
|
||||
|
||||
def __init__(self, model_manager, parent=None):
|
||||
super().__init__(parent)
|
||||
self.model_manager = model_manager
|
||||
self.running = False
|
||||
self.paused = False
|
||||
|
||||
# Video source
|
||||
self.source = 0
|
||||
self.cap = None
|
||||
self.source_fps = 0
|
||||
self.target_fps = 30 # Target FPS for UI updates
|
||||
|
||||
# Performance tracking
|
||||
self.detection_fps = 0
|
||||
self.ui_fps = 0
|
||||
self.frame_count = 0
|
||||
self.start_time = 0
|
||||
self.detection_times = deque(maxlen=30) # Last 30 detection times
|
||||
self.ui_frame_times = deque(maxlen=30) # Last 30 UI frame times
|
||||
self.last_ui_frame_time = 0
|
||||
|
||||
# Mutexes for thread safety
|
||||
self.mutex = QMutex()
|
||||
self.wait_condition = QWaitCondition()
|
||||
|
||||
# FPS limiter to avoid CPU overload
|
||||
self.last_frame_time = 0
|
||||
self.min_frame_interval = 1.0 / 60 # Max 60 FPS
|
||||
|
||||
# Async processing queue with frame IDs
|
||||
self.frame_queue = [] # List of (frame_id, frame) tuples
|
||||
self.next_frame_id = 0
|
||||
self.processed_frames = {} # frame_id -> (frame, detections, metrics)
|
||||
self.last_emitted_frame_id = -1
|
||||
# Separate UI thread timer for smooth display
|
||||
self.ui_timer = QTimer()
|
||||
self.ui_timer.timeout.connect(self._emit_next_frame)
|
||||
|
||||
def set_source(self, source):
|
||||
"""Set video source - camera index or file path."""
|
||||
print(f"[AsyncThread] set_source: {source} ({type(source)})")
|
||||
if source is None:
|
||||
self.source = 0
|
||||
elif isinstance(source, str) and os.path.isfile(source):
|
||||
self.source = source
|
||||
elif isinstance(source, int):
|
||||
self.source = source
|
||||
else:
|
||||
print("[AsyncThread] Invalid source, defaulting to camera")
|
||||
self.source = 0
|
||||
|
||||
def start_processing(self):
|
||||
"""Start video processing."""
|
||||
self.running = True
|
||||
self.start()
|
||||
# Start UI timer for smooth frame emission
|
||||
self.ui_timer.start(int(1000 / self.target_fps))
|
||||
|
||||
def stop_processing(self):
|
||||
"""Stop video processing."""
|
||||
self.running = False
|
||||
self.wait_condition.wakeAll()
|
||||
self.wait()
|
||||
self.ui_timer.stop()
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
def pause_processing(self):
|
||||
"""Pause video processing."""
|
||||
self.mutex.lock()
|
||||
self.paused = True
|
||||
self.mutex.unlock()
|
||||
|
||||
def resume_processing(self):
|
||||
"""Resume video processing."""
|
||||
self.mutex.lock()
|
||||
self.paused = False
|
||||
self.wait_condition.wakeAll()
|
||||
self.mutex.unlock()
|
||||
|
||||
def run(self):
|
||||
"""Main thread execution loop."""
|
||||
self._initialize_video()
|
||||
self.start_time = time.time()
|
||||
self.frame_count = 0
|
||||
|
||||
while self.running:
|
||||
# Check if paused
|
||||
self.mutex.lock()
|
||||
if self.paused:
|
||||
self.wait_condition.wait(self.mutex)
|
||||
self.mutex.unlock()
|
||||
|
||||
if not self.running:
|
||||
break
|
||||
|
||||
# Control frame rate
|
||||
current_time = time.time()
|
||||
time_diff = current_time - self.last_frame_time
|
||||
if time_diff < self.min_frame_interval:
|
||||
time.sleep(self.min_frame_interval - time_diff)
|
||||
|
||||
# Read frame
|
||||
ret, frame = self.cap.read()
|
||||
self.last_frame_time = time.time()
|
||||
|
||||
if not ret or frame is None:
|
||||
print("End of video or failed to read frame")
|
||||
# Check if we're using a file and should restart
|
||||
if isinstance(self.source, str) and os.path.isfile(self.source):
|
||||
self._initialize_video() # Restart video
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
# Process frame asynchronously
|
||||
self._process_frame_async(frame)
|
||||
|
||||
# Update frame counter
|
||||
self.frame_count += 1
|
||||
|
||||
# Clean up when thread exits
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
def _initialize_video(self):
|
||||
"""Initialize video source."""
|
||||
try:
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
|
||||
print(f"[EnhancedVideoController] _initialize_video: self.source = {self.source} (type: {type(self.source)})")
|
||||
# Only use camera if source is int or '0', else use file path
|
||||
if isinstance(self.source, int):
|
||||
self.cap = cv2.VideoCapture(self.source)
|
||||
elif isinstance(self.source, str) and os.path.isfile(self.source):
|
||||
self.cap = cv2.VideoCapture(self.source)
|
||||
else:
|
||||
print(f"[EnhancedVideoController] Invalid source: {self.source}, not opening VideoCapture.")
|
||||
return False
|
||||
|
||||
if not self.cap.isOpened():
|
||||
print(f"Failed to open video source: {self.source}")
|
||||
return False
|
||||
|
||||
# Get source FPS
|
||||
self.source_fps = self.cap.get(cv2.CAP_PROP_FPS)
|
||||
if self.source_fps <= 0:
|
||||
self.source_fps = 30 # Default fallback
|
||||
|
||||
print(f"Video source initialized: {self.source}, FPS: {self.source_fps}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error initializing video: {e}")
|
||||
return False
|
||||
|
||||
def _process_frame_async(self, frame):
|
||||
"""Process a frame with async detection."""
|
||||
try:
|
||||
# Start detection timer
|
||||
detection_start = time.time()
|
||||
|
||||
# Assign frame ID
|
||||
frame_id = self.next_frame_id
|
||||
self.next_frame_id += 1
|
||||
|
||||
# Get detector and start async inference
|
||||
detector = self.model_manager.detector
|
||||
|
||||
# Check if detector supports async API
|
||||
if hasattr(detector, 'detect_async_start'):
|
||||
# Use async API
|
||||
inf_frame_id = detector.detect_async_start(frame)
|
||||
|
||||
# Store frame in queue with the right ID
|
||||
self.mutex.lock()
|
||||
self.frame_queue.append((frame_id, frame, inf_frame_id))
|
||||
self.mutex.unlock()
|
||||
|
||||
# Try getting results from previous frames
|
||||
self._check_async_results()
|
||||
|
||||
else:
|
||||
# Fallback to synchronous API
|
||||
detections = self.model_manager.detect(frame)
|
||||
|
||||
# Calculate detection time
|
||||
detection_time = time.time() - detection_start
|
||||
self.detection_times.append(detection_time)
|
||||
|
||||
# Update detection FPS
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed > 0:
|
||||
self.detection_fps = self.frame_count / elapsed
|
||||
|
||||
# Calculate detection metrics
|
||||
detection_ms = detection_time * 1000
|
||||
avg_detection_ms = np.mean(self.detection_times) * 1000
|
||||
|
||||
# Store metrics
|
||||
metrics = {
|
||||
'detection_fps': self.detection_fps,
|
||||
'detection_ms': detection_ms,
|
||||
'avg_detection_ms': avg_detection_ms,
|
||||
'frame_id': frame_id
|
||||
}
|
||||
|
||||
# Store processed frame
|
||||
self.mutex.lock()
|
||||
self.processed_frames[frame_id] = (frame, detections, metrics)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Emit stats update
|
||||
self.stats_updated.emit(metrics)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in frame processing: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _check_async_results(self):
|
||||
"""Check for completed async inference requests."""
|
||||
try:
|
||||
detector = self.model_manager.detector
|
||||
if not hasattr(detector, 'detect_async_get_result'):
|
||||
return
|
||||
|
||||
# Get any frames waiting for results
|
||||
self.mutex.lock()
|
||||
queue_copy = self.frame_queue.copy()
|
||||
self.mutex.unlock()
|
||||
|
||||
processed_frames = []
|
||||
|
||||
# Check each frame in the queue
|
||||
for idx, (frame_id, frame, inf_frame_id) in enumerate(queue_copy):
|
||||
# Try to get results without waiting
|
||||
detections = detector.detect_async_get_result(inf_frame_id, wait=False)
|
||||
|
||||
# If results are ready
|
||||
if detections is not None:
|
||||
# Calculate metrics
|
||||
detection_time = time.time() - detector.active_requests[inf_frame_id][2] if inf_frame_id in detector.active_requests else 0
|
||||
self.detection_times.append(detection_time)
|
||||
|
||||
# Update detection FPS
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed > 0:
|
||||
self.detection_fps = self.frame_count / elapsed
|
||||
|
||||
# Calculate metrics
|
||||
detection_ms = detection_time * 1000
|
||||
avg_detection_ms = np.mean(self.detection_times) * 1000
|
||||
|
||||
# Store metrics
|
||||
metrics = {
|
||||
'detection_fps': self.detection_fps,
|
||||
'detection_ms': detection_ms,
|
||||
'avg_detection_ms': avg_detection_ms,
|
||||
'frame_id': frame_id
|
||||
}
|
||||
|
||||
# Store processed frame
|
||||
self.mutex.lock()
|
||||
self.processed_frames[frame_id] = (frame, detections, metrics)
|
||||
processed_frames.append(frame_id)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Emit stats update
|
||||
self.stats_updated.emit(metrics)
|
||||
|
||||
# Remove processed frames from queue
|
||||
if processed_frames:
|
||||
self.mutex.lock()
|
||||
self.frame_queue = [item for item in self.frame_queue
|
||||
if item[0] not in processed_frames]
|
||||
self.mutex.unlock()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error checking async results: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _emit_next_frame(self):
|
||||
"""Emit the next processed frame to UI at a controlled rate."""
|
||||
try:
|
||||
# Update UI FPS calculation
|
||||
current_time = time.time()
|
||||
if self.last_ui_frame_time > 0:
|
||||
ui_frame_time = current_time - self.last_ui_frame_time
|
||||
self.ui_frame_times.append(ui_frame_time)
|
||||
self.ui_fps = 1.0 / ui_frame_time if ui_frame_time > 0 else 0
|
||||
self.last_ui_frame_time = current_time
|
||||
|
||||
# Check async results first
|
||||
self._check_async_results()
|
||||
|
||||
# Find the next frame to emit
|
||||
self.mutex.lock()
|
||||
available_frames = sorted(self.processed_frames.keys())
|
||||
self.mutex.unlock()
|
||||
|
||||
if not available_frames:
|
||||
return
|
||||
|
||||
next_frame_id = available_frames[0]
|
||||
|
||||
# Get the frame data
|
||||
self.mutex.lock()
|
||||
frame, detections, metrics = self.processed_frames.pop(next_frame_id)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Add UI FPS to metrics
|
||||
metrics['ui_fps'] = self.ui_fps
|
||||
|
||||
# Apply tracking if available
|
||||
if self.model_manager.tracker:
|
||||
detections = self.model_manager.update_tracking(detections, frame)
|
||||
|
||||
# Emit the frame to the UI
|
||||
self.frame_processed.emit(frame, detections, metrics)
|
||||
|
||||
# Store as last emitted frame
|
||||
self.last_emitted_frame_id = next_frame_id
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error emitting frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
class EnhancedVideoController(QObject):
|
||||
"""
|
||||
Enhanced video controller with better file handling and statistics.
|
||||
"""
|
||||
# Define signals
|
||||
frame_ready = Signal(QPixmap) # Frame as QPixmap for direct display
|
||||
frame_np_ready = Signal(np.ndarray) # Frame as NumPy array
|
||||
raw_frame_ready = Signal(dict) # Raw frame data with detections
|
||||
stats_ready = Signal(dict) # All performance stats (dictionary with fps and detection_time)
|
||||
|
||||
# Add instance variable to track the most recent traffic light color
|
||||
def __init__(self, model_manager=None):
|
||||
"""Initialize the video controller"""
|
||||
super().__init__()
|
||||
|
||||
# Input source
|
||||
self._source = 0 # Default to camera 0
|
||||
self._source_type = "camera"
|
||||
self._running = False
|
||||
self._last_traffic_light_color = "unknown"
|
||||
|
||||
# Regular Controller instance variables
|
||||
self.model_manager = model_manager
|
||||
self.processing_thread = None
|
||||
self.show_annotations = True
|
||||
self.show_fps = True
|
||||
self.save_video = False
|
||||
self.video_writer = None
|
||||
|
||||
def set_source(self, source):
|
||||
"""Set video source - camera index or file path."""
|
||||
print(f"[EnhancedVideoController] set_source: {source} ({type(source)})")
|
||||
if self.processing_thread:
|
||||
self.processing_thread.set_source(source)
|
||||
|
||||
def start(self):
|
||||
"""Start video processing."""
|
||||
if self.processing_thread and self.processing_thread.running:
|
||||
return
|
||||
|
||||
# Create new processing thread
|
||||
self.processing_thread = AsyncVideoProcessingThread(self.model_manager)
|
||||
|
||||
# Connect signals
|
||||
self.processing_thread.frame_processed.connect(self._on_frame_processed)
|
||||
self.processing_thread.stats_updated.connect(self._on_stats_updated)
|
||||
|
||||
# Start processing
|
||||
self.processing_thread.start_processing()
|
||||
|
||||
def stop(self):
|
||||
"""Stop video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.stop_processing()
|
||||
self.processing_thread = None
|
||||
|
||||
if self.video_writer:
|
||||
self.video_writer.release()
|
||||
self.video_writer = None
|
||||
|
||||
def pause(self):
|
||||
"""Pause video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.pause_processing()
|
||||
|
||||
def resume(self):
|
||||
"""Resume video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.resume_processing()
|
||||
|
||||
def toggle_annotations(self, enabled):
|
||||
"""Toggle annotations on/off."""
|
||||
self.show_annotations = enabled
|
||||
|
||||
def toggle_fps_display(self, enabled):
|
||||
"""Toggle FPS display on/off."""
|
||||
self.show_fps = enabled
|
||||
|
||||
def start_recording(self, output_path, frame_size=(640, 480), fps=30):
|
||||
"""Start recording video to file."""
|
||||
self.save_video = True
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
||||
self.video_writer = cv2.VideoWriter(
|
||||
output_path, fourcc, fps,
|
||||
(frame_size[0], frame_size[1])
|
||||
)
|
||||
|
||||
def stop_recording(self):
|
||||
"""Stop recording video."""
|
||||
self.save_video = False
|
||||
if self.video_writer:
|
||||
self.video_writer.release()
|
||||
self.video_writer = None
|
||||
|
||||
def _on_frame_processed(self, frame, detections, metrics):
|
||||
"""Handle processed frame from the worker thread."""
|
||||
try:
|
||||
# Create a copy of the frame for annotation
|
||||
display_frame = frame.copy()
|
||||
|
||||
# Apply annotations if enabled
|
||||
if self.show_annotations and detections:
|
||||
display_frame = enhanced_draw_detections(display_frame, detections) # Detect and annotate traffic light colors
|
||||
for detection in detections:
|
||||
# Check for both class_id 9 (COCO) and any other traffic light classes
|
||||
if detection.get('class_id') == 9 or detection.get('class_name') == 'traffic light':
|
||||
bbox = detection.get('bbox')
|
||||
if not bbox:
|
||||
continue
|
||||
|
||||
# Get traffic light color
|
||||
color = detect_traffic_light_color(frame, bbox)
|
||||
# Store the latest traffic light color
|
||||
self._last_traffic_light_color = color
|
||||
# Draw traffic light status
|
||||
display_frame = draw_traffic_light_status(display_frame, bbox, color)
|
||||
print(f"🚦 Traffic light detected with color: {color}")
|
||||
|
||||
# Add FPS counter if enabled
|
||||
if self.show_fps:
|
||||
# Add both detection and UI FPS
|
||||
detection_fps = metrics.get('detection_fps', 0)
|
||||
ui_fps = metrics.get('ui_fps', 0)
|
||||
detection_ms = metrics.get('avg_detection_ms', 0)
|
||||
|
||||
display_frame = draw_performance_overlay(
|
||||
display_frame,
|
||||
{
|
||||
"Detection FPS": f"{detection_fps:.1f}",
|
||||
"UI FPS": f"{ui_fps:.1f}",
|
||||
"Inference": f"{detection_ms:.1f} ms"
|
||||
}
|
||||
)
|
||||
|
||||
# Save frame if recording
|
||||
if self.save_video and self.video_writer:
|
||||
self.video_writer.write(display_frame)
|
||||
|
||||
# Convert to QPixmap for display
|
||||
pixmap = enhanced_cv_to_pixmap(display_frame)
|
||||
|
||||
# Emit signals
|
||||
self.frame_ready.emit(pixmap, detections, metrics)
|
||||
self.raw_frame_ready.emit(frame, detections, metrics)
|
||||
# Emit numpy frame for compatibility with existing connections
|
||||
self.frame_np_ready.emit(frame)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
def _on_stats_updated(self, stats):
|
||||
"""Handle updated statistics from the worker thread."""
|
||||
try:
|
||||
# Create a proper stats dictionary for the LiveTab
|
||||
ui_stats = {
|
||||
'fps': stats.get('detection_fps', 0.0),
|
||||
'detection_time': stats.get('avg_detection_ms', 0.0),
|
||||
'traffic_light_color': self._last_traffic_light_color
|
||||
}
|
||||
print(f"Emitting stats: {ui_stats}")
|
||||
# Emit as a dictionary - fixed signal/slot mismatch
|
||||
self.stats_ready.emit(ui_stats)
|
||||
except Exception as e:
|
||||
print(f"Error in stats update: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _process_frame_for_display(self, frame, detections, metrics=None):
|
||||
"""Process a frame for display, adding annotations."""
|
||||
try:
|
||||
# Create a copy for display
|
||||
display_frame = frame.copy()
|
||||
# Process traffic light detections to identify colors
|
||||
for det in detections:
|
||||
if det.get('class_name') == 'traffic light':
|
||||
# Get traffic light color
|
||||
bbox = det['bbox']
|
||||
light_color = detect_traffic_light_color(frame, bbox)
|
||||
|
||||
# Add color information to detection
|
||||
det['traffic_light_color'] = light_color
|
||||
|
||||
# Store the latest traffic light color
|
||||
self._last_traffic_light_color = light_color
|
||||
|
||||
# Use specialized drawing for traffic lights
|
||||
display_frame = draw_traffic_light_status(display_frame, bbox, light_color)
|
||||
|
||||
print(f"🚦 Traffic light detected with color: {light_color}")
|
||||
else:
|
||||
# Draw regular detection box
|
||||
bbox = det['bbox']
|
||||
x1, y1, x2, y2 = [int(c) for c in bbox]
|
||||
class_name = det.get('class_name', 'object')
|
||||
confidence = det.get('confidence', 0.0)
|
||||
|
||||
label = f"{class_name} {confidence:.2f}"
|
||||
color = (0, 255, 0) # Green for other objects
|
||||
|
||||
cv2.rectangle(display_frame, (x1, y1), (x2, y2), color, 2)
|
||||
cv2.putText(display_frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
||||
|
||||
# Add tracker visualization if tracking is enabled
|
||||
if self.tracker and hasattr(self, 'visualization_tracks'):
|
||||
# Draw current tracks
|
||||
for track_id, track_info in self.visualization_tracks.items():
|
||||
track_box = track_info.get('box')
|
||||
if track_box:
|
||||
x1, y1, x2, y2 = [int(c) for c in track_box]
|
||||
track_class = track_info.get('class_name', 'tracked')
|
||||
|
||||
# Draw track ID and class
|
||||
cv2.rectangle(display_frame, (x1, y1), (x2, y2), (255, 0, 255), 2)
|
||||
cv2.putText(display_frame, f"{track_class} #{track_id}",
|
||||
(x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
|
||||
|
||||
# Draw trail if available
|
||||
trail = track_info.get('trail', [])
|
||||
if len(trail) > 1:
|
||||
for i in range(1, len(trail)):
|
||||
cv2.line(display_frame,
|
||||
(int(trail[i-1][0]), int(trail[i-1][1])),
|
||||
(int(trail[i][0]), int(trail[i][1])),
|
||||
(255, 0, 255), 2)
|
||||
|
||||
# Add FPS counter if enabled
|
||||
if self.show_fps:
|
||||
# Add both detection and UI FPS
|
||||
detection_fps = metrics.get('detection_fps', 0)
|
||||
ui_fps = metrics.get('ui_fps', 0)
|
||||
detection_ms = metrics.get('avg_detection_ms', 0)
|
||||
|
||||
display_frame = draw_performance_overlay(
|
||||
display_frame,
|
||||
{
|
||||
"Detection FPS": f"{detection_fps:.1f}",
|
||||
"UI FPS": f"{ui_fps:.1f}",
|
||||
"Inference": f"{detection_ms:.1f} ms"
|
||||
}
|
||||
)
|
||||
|
||||
# Save frame if recording
|
||||
if self.save_video and self.video_writer:
|
||||
self.video_writer.write(display_frame)
|
||||
|
||||
# Convert to QPixmap for display
|
||||
pixmap = enhanced_cv_to_pixmap(display_frame)
|
||||
|
||||
# Emit signals
|
||||
self.frame_ready.emit(pixmap, detections, metrics)
|
||||
self.raw_frame_ready.emit(frame, detections, metrics)
|
||||
# Emit numpy frame for compatibility with existing connections
|
||||
self.frame_np_ready.emit(frame)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
474
qt_app_pyside1/controllers/model_manager.py
Normal file
474
qt_app_pyside1/controllers/model_manager.py
Normal file
@@ -0,0 +1,474 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import OpenVINO modules
|
||||
from detection_openvino import OpenVINOVehicleDetector
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
|
||||
# Import from our utils package
|
||||
from utils.helpers import bbox_iou
|
||||
|
||||
class ModelManager:
|
||||
"""
|
||||
Manages OpenVINO models for traffic detection and violation monitoring.
|
||||
Only uses RedLightViolationPipeline for all violation/crosswalk/traffic light logic.
|
||||
"""
|
||||
def __init__(self, config_file: str = None, tracker=None):
|
||||
"""
|
||||
Initialize model manager with configuration.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
tracker: (Optional) External tracker instance (e.g., DeepSortVehicleTracker singleton)
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.detector = None
|
||||
self.violation_pipeline = None # Use RedLightViolationPipeline only
|
||||
self.tracker = tracker
|
||||
self._initialize_models()
|
||||
|
||||
def _load_config(self, config_file: Optional[str]) -> Dict:
|
||||
"""
|
||||
Load configuration from file or use defaults.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
"""
|
||||
import json
|
||||
default_config = {
|
||||
"detection": {
|
||||
"confidence_threshold": 0.3,
|
||||
"enable_ocr": True,
|
||||
"enable_tracking": True,
|
||||
"model_path": None
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": True,
|
||||
"show_labels": True,
|
||||
"show_license_plates": True
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
}
|
||||
}
|
||||
|
||||
if config_file and os.path.exists(config_file):
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
loaded_config = json.load(f)
|
||||
# Merge with defaults (preserving loaded values)
|
||||
for section in default_config:
|
||||
if section in loaded_config:
|
||||
default_config[section].update(loaded_config[section])
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
|
||||
return default_config
|
||||
|
||||
def _initialize_models(self):
|
||||
"""Initialize OpenVINO detection and violation models."""
|
||||
try:
|
||||
# Find best model path
|
||||
model_path = self.config["detection"].get("model_path")
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
model_path = self._find_best_model_path()
|
||||
if not model_path:
|
||||
print("❌ No model found")
|
||||
return
|
||||
|
||||
# Initialize detector
|
||||
print(f"✅ Initializing OpenVINO detector with model: {model_path}")
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
print(f"✅ Using inference device: {device}")
|
||||
self.detector = OpenVINOVehicleDetector(
|
||||
model_path=model_path,
|
||||
device=device,
|
||||
confidence_threshold=self.config["detection"]["confidence_threshold"]
|
||||
)
|
||||
|
||||
# Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic
|
||||
self.violation_pipeline = RedLightViolationPipeline(debug=True)
|
||||
print("✅ Red light violation pipeline initialized (all other violation logic removed)")
|
||||
|
||||
# Only initialize tracker if not provided
|
||||
if self.tracker is None and self.config["detection"]["enable_tracking"]:
|
||||
try:
|
||||
from controllers.bytetrack_tracker import ByteTrackVehicleTracker
|
||||
self.tracker = ByteTrackVehicleTracker()
|
||||
print("✅ ByteTrack tracker initialized (internal)")
|
||||
except ImportError:
|
||||
print("⚠️ ByteTrack not available")
|
||||
self.tracker = None
|
||||
elif self.tracker is not None:
|
||||
print("✅ Using external DeepSORT tracker instance")
|
||||
print("✅ Models initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error initializing models: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]:
|
||||
|
||||
|
||||
if base_model_name is None:
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
if device == "CPU" or device == "AUTO":
|
||||
# Use yolo11n for CPU - faster, lighter model
|
||||
base_model_name = "yolo11n"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
|
||||
else:
|
||||
# Use yolo11x for GPU - larger model with better accuracy
|
||||
base_model_name = "yolo11x"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
|
||||
|
||||
# Check if the openvino_models directory exists in the current working directory
|
||||
cwd_openvino_dir = Path.cwd() / "openvino_models"
|
||||
if cwd_openvino_dir.exists():
|
||||
direct_path = cwd_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model directly in CWD: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for absolute path to openvino_models (this is the most reliable)
|
||||
absolute_openvino_dir = Path("D:/Downloads/finale6/khatam/openvino_models")
|
||||
if absolute_openvino_dir.exists():
|
||||
direct_path = absolute_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model at absolute path: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Try relative to the model_manager.py file
|
||||
openvino_models_dir = Path(__file__).parent.parent.parent / "openvino_models"
|
||||
direct_path = openvino_models_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model in app directory: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for model in folder structure within openvino_models
|
||||
subfolder_path = openvino_models_dir / f"{base_model_name}_openvino_model" / f"{base_model_name}.xml"
|
||||
if subfolder_path.exists():
|
||||
print(f"✅ Found model in subfolder: {subfolder_path}")
|
||||
return str(subfolder_path.absolute())
|
||||
|
||||
# Try other common locations
|
||||
search_dirs = [
|
||||
".",
|
||||
"..",
|
||||
"../models",
|
||||
"../rcb",
|
||||
"../openvino_models",
|
||||
f"../{base_model_name}_openvino_model",
|
||||
"../..", # Go up to project root
|
||||
"../../openvino_models", # Project root / openvino_models
|
||||
]
|
||||
|
||||
model_extensions = [
|
||||
(f"{base_model_name}.xml", "OpenVINO IR direct"),
|
||||
(f"{base_model_name}_openvino_model/{base_model_name}.xml", "OpenVINO IR"),
|
||||
(f"{base_model_name}.pt", "PyTorch"),
|
||||
]
|
||||
|
||||
for search_dir in search_dirs:
|
||||
search_path = Path(__file__).parent.parent / search_dir
|
||||
if not search_path.exists():
|
||||
continue
|
||||
|
||||
for model_file, model_type in model_extensions:
|
||||
model_path = search_path / model_file
|
||||
if model_path.exists():
|
||||
print(f"✅ Found {model_type} model: {model_path}")
|
||||
return str(model_path.absolute())
|
||||
|
||||
print(f"❌ No model found for {base_model_name}")
|
||||
return None
|
||||
|
||||
def detect(self, frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Detect objects in frame.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
|
||||
Returns:
|
||||
List of detection dictionaries
|
||||
"""
|
||||
if self.detector is None:
|
||||
print("WARNING: No detector available")
|
||||
return []
|
||||
try:
|
||||
# Use a lower confidence threshold for better visibility
|
||||
base_conf_threshold = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
conf_threshold = max(0.15, base_conf_threshold) # Lowered to 0.15 for traffic lights
|
||||
detections = self.detector.detect_vehicles(frame, conf_threshold=conf_threshold)
|
||||
# Try to find traffic lights with even lower confidence if none found
|
||||
traffic_light_found = any(det.get('class_name') == 'traffic light' for det in detections)
|
||||
if not traffic_light_found:
|
||||
print("⚠️ No traffic lights detected with normal confidence, trying lower threshold...")
|
||||
try:
|
||||
low_conf_detections = self.detector.detect_vehicles(frame, conf_threshold=0.05)
|
||||
for det in low_conf_detections:
|
||||
if det.get('class_name') == 'traffic light' and det not in detections:
|
||||
print(f"🚦 Adding low confidence traffic light: conf={det['confidence']:.3f}")
|
||||
detections.append(det)
|
||||
except Exception as e:
|
||||
print(f"❌ Error trying low confidence detection: {e}")
|
||||
# Enhance traffic light detection using the same utilities as qt_app_pyside
|
||||
from utils.traffic_light_utils import detect_traffic_light_color, ensure_traffic_light_color
|
||||
for det in detections:
|
||||
if det.get('class_id') == 9 or det.get('class_name') == 'traffic light':
|
||||
try:
|
||||
bbox = det['bbox']
|
||||
light_info = detect_traffic_light_color(frame, bbox)
|
||||
if light_info.get("color", "unknown") == "unknown":
|
||||
light_info = ensure_traffic_light_color(frame, bbox)
|
||||
det['traffic_light_color'] = light_info
|
||||
print(f"🚦 Enhanced Traffic Light Detection: {light_info}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error in enhanced traffic light detection: {e}")
|
||||
# Ensure all detections have valid class_name and confidence
|
||||
for det in detections:
|
||||
if det.get('class_name') is None:
|
||||
det['class_name'] = 'object'
|
||||
if det.get('confidence') is None:
|
||||
det['confidence'] = 0.0
|
||||
# Add debug output
|
||||
if detections:
|
||||
print(f"DEBUG: Detected {len(detections)} objects: " + ", ".join([f"{d['class_name']} ({d['confidence']:.2f})" for d in detections[:3]]))
|
||||
# Print bounding box coordinates of first detection
|
||||
if len(detections) > 0:
|
||||
print(f"DEBUG: First detection bbox: {detections[0]['bbox']}")
|
||||
else:
|
||||
print("DEBUG: No detections in this frame")
|
||||
return detections
|
||||
except Exception as e:
|
||||
print(f"❌ Detection error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracking(self, detections: List[Dict], frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Update tracking information for detections.
|
||||
|
||||
Args:
|
||||
detections: List of detections
|
||||
frame: Current video frame
|
||||
|
||||
Returns:
|
||||
Updated list of detections with tracking info
|
||||
"""
|
||||
if not self.tracker or not detections:
|
||||
# Fallback: assign temporary IDs if no tracker
|
||||
for idx, det in enumerate(detections):
|
||||
det['id'] = idx
|
||||
if det.get('class_name') is None:
|
||||
det['class_name'] = 'object'
|
||||
if det.get('confidence') is None:
|
||||
det['confidence'] = 0.0
|
||||
return detections
|
||||
try:
|
||||
tracker_dets = []
|
||||
det_map = [] # Keep mapping to original detection
|
||||
for det in detections:
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
x1, y1, x2, y2 = bbox
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
if w <= 0 or h <= 0:
|
||||
continue
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_name = det.get('class_name', 'object')
|
||||
tracker_dets.append(([x1, y1, w, h], conf, class_name))
|
||||
det_map.append(det)
|
||||
# Update tracks
|
||||
output = []
|
||||
if tracker_dets:
|
||||
tracks = self.tracker.update_tracks(tracker_dets, frame=frame)
|
||||
for i, track in enumerate(tracks):
|
||||
# FIXED: Handle both object-style tracks (with methods) and dict-style tracks
|
||||
# First check if track is confirmed (handle both dict and object styles)
|
||||
is_confirmed = True # Default to True for dict-style tracks
|
||||
if hasattr(track, 'is_confirmed') and callable(getattr(track, 'is_confirmed')):
|
||||
is_confirmed = track.is_confirmed()
|
||||
|
||||
if not is_confirmed:
|
||||
continue
|
||||
|
||||
# Get track_id (handle both dict and object styles)
|
||||
if hasattr(track, 'track_id'):
|
||||
track_id = track.track_id
|
||||
elif isinstance(track, dict) and 'id' in track:
|
||||
track_id = track['id']
|
||||
else:
|
||||
print(f"Warning: Track has no ID, skipping: {track}")
|
||||
continue
|
||||
|
||||
# Get bounding box (handle both dict and object styles)
|
||||
if hasattr(track, 'to_ltrb') and callable(getattr(track, 'to_ltrb')):
|
||||
ltrb = track.to_ltrb()
|
||||
elif isinstance(track, dict) and 'bbox' in track:
|
||||
ltrb = track['bbox'] # Assume bbox is already in [x1,y1,x2,y2] format
|
||||
else:
|
||||
print(f"Warning: Track has no bbox, skipping: {track}")
|
||||
continue
|
||||
|
||||
# Try to match track to detection by index (DeepSORT returns tracks in same order as input detections)
|
||||
# If not, fallback to previous logic
|
||||
matched_class = 'object'
|
||||
matched_conf = 0.0
|
||||
if hasattr(track, 'det_index') and track.det_index is not None and track.det_index < len(det_map):
|
||||
matched_class = det_map[track.det_index].get('class_name', 'object')
|
||||
matched_conf = det_map[track.det_index].get('confidence', 0.0)
|
||||
else:
|
||||
# Try to match by IoU if possible
|
||||
best_iou = 0
|
||||
for det in det_map:
|
||||
db = det['bbox']
|
||||
iou = self._bbox_iou([int(ltrb[0]), int(ltrb[1]), int(ltrb[2]), int(ltrb[3])], db)
|
||||
if iou > best_iou:
|
||||
best_iou = iou
|
||||
matched_class = det.get('class_name', 'object')
|
||||
matched_conf = det.get('confidence', 0.0)
|
||||
if matched_class is None:
|
||||
matched_class = 'object'
|
||||
if matched_conf is None:
|
||||
matched_conf = 0.0
|
||||
output.append({
|
||||
'bbox': [int(ltrb[0]), int(ltrb[1]), int(ltrb[2]), int(ltrb[3])],
|
||||
'class_name': matched_class,
|
||||
'confidence': matched_conf,
|
||||
'id': track_id
|
||||
})
|
||||
# Fallback: assign temp IDs if no tracks
|
||||
if not output:
|
||||
for idx, det in enumerate(detections):
|
||||
det['id'] = idx
|
||||
if det.get('class_name') is None:
|
||||
det['class_name'] = 'object'
|
||||
if det.get('confidence') is None:
|
||||
det['confidence'] = 0.0
|
||||
return detections
|
||||
return output
|
||||
except Exception as e:
|
||||
print(f"❌ Tracking error: {e}")
|
||||
# Fallback: assign temp IDs
|
||||
for idx, det in enumerate(detections):
|
||||
det['id'] = idx
|
||||
if det.get('class_name') is None:
|
||||
det['class_name'] = 'object'
|
||||
if det.get('confidence') is None:
|
||||
det['confidence'] = 0.0
|
||||
return detections
|
||||
|
||||
def update_config(self, new_config: Dict):
|
||||
"""
|
||||
Update configuration parameters.
|
||||
|
||||
Args:
|
||||
new_config: New configuration dictionary
|
||||
"""
|
||||
if not new_config:
|
||||
return
|
||||
|
||||
# Store old device setting to check if it changed
|
||||
old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO"
|
||||
|
||||
# Update configuration
|
||||
for section in new_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(new_config[section])
|
||||
else:
|
||||
self.config[section] = new_config[section]
|
||||
|
||||
# Check if device changed - if so, we need to reinitialize models
|
||||
new_device = self.config["detection"].get("device", "AUTO")
|
||||
device_changed = old_device != new_device
|
||||
|
||||
if device_changed:
|
||||
print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...")
|
||||
# Reinitialize models with new device
|
||||
self._initialize_models()
|
||||
return
|
||||
|
||||
# Just update detector confidence threshold if device didn't change
|
||||
if self.detector:
|
||||
conf_thres = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
self.detector.conf_thres = conf_thres
|
||||
|
||||
def _bbox_iou(self, boxA, boxB):
|
||||
# Compute the intersection over union of two boxes
|
||||
xA = max(boxA[0], boxB[0])
|
||||
yA = max(boxA[1], boxB[1])
|
||||
xB = min(boxA[2], boxB[2])
|
||||
yB = min(boxA[3], boxB[3])
|
||||
interArea = max(0, xB - xA) * max(0, yB - yA)
|
||||
boxAArea = max(0, boxA[2] - boxA[0]) * max(0, boxA[3] - boxA[1])
|
||||
boxBArea = max(0, boxB[2] - boxB[0]) * max(0, boxB[3] - boxB[1])
|
||||
if boxAArea + boxBArea - interArea == 0:
|
||||
return 0.0
|
||||
iou = interArea / float(boxAArea + boxBArea - interArea)
|
||||
return iou
|
||||
|
||||
def switch_model(self, target_device: str = None) -> bool:
|
||||
"""
|
||||
Manually switch to a different model based on target device.
|
||||
Args:
|
||||
target_device: Target device ("CPU", "GPU", "AUTO", etc.)
|
||||
Returns:
|
||||
True if switch was successful, False otherwise
|
||||
"""
|
||||
if target_device:
|
||||
old_device = self.config["detection"].get("device", "AUTO")
|
||||
self.config["detection"]["device"] = target_device
|
||||
print(f"🔄 Manual model switch requested: {old_device} → {target_device}")
|
||||
# If detector has a switch_model method, use it
|
||||
if hasattr(self.detector, 'switch_model'):
|
||||
try:
|
||||
success = self.detector.switch_model(device=target_device)
|
||||
if success:
|
||||
print(f"✅ Successfully switched to {target_device} optimized model")
|
||||
# If tracker needs update, reinitialize if device changed
|
||||
if old_device != target_device:
|
||||
self._initialize_models() # Optionally update tracker
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Failed to switch detector to {target_device}")
|
||||
self.config["detection"]["device"] = old_device
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to switch model: {e}")
|
||||
self.config["detection"]["device"] = old_device
|
||||
return False
|
||||
else:
|
||||
# Fallback: reinitialize models
|
||||
try:
|
||||
self._initialize_models()
|
||||
print(f"✅ Successfully switched to {target_device} optimized model (fallback)")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to switch model: {e}")
|
||||
self.config["detection"]["device"] = old_device
|
||||
return False
|
||||
return False
|
||||
471
qt_app_pyside1/controllers/new.py
Normal file
471
qt_app_pyside1/controllers/new.py
Normal file
@@ -0,0 +1,471 @@
|
||||
"""
|
||||
Final Video Controller for Automatic Traffic Red-Light Violation Detection
|
||||
- Uses detection_openvino.py for OpenVINO YOLOv11n detection
|
||||
- Crosswalk (zebra crossing) detection using RANSAC/white-line logic
|
||||
- Vehicle tracking using OpenCV trackers
|
||||
- Violation logic: detects vehicles crossing the violation line on red
|
||||
- Visualization and video output
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from sklearn import linear_model
|
||||
|
||||
|
||||
# --- Crosswalk (Zebra Crossing) Detection ---
|
||||
def detect_crosswalk(frame):
|
||||
"""Detect crosswalk (zebra crossing) in the frame. Returns dict with detection status and y position."""
|
||||
# White color mask
|
||||
lower = np.array([170, 170, 170])
|
||||
upper = np.array([255, 255, 255])
|
||||
mask = cv2.inRange(frame, lower, upper)
|
||||
# Erode to remove noise
|
||||
erode_size = max(1, frame.shape[0] // 30)
|
||||
erode_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (erode_size, 1))
|
||||
eroded = cv2.erode(mask, erode_structure, (-1, -1))
|
||||
# Find contours
|
||||
contours, _ = cv2.findContours(eroded, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
|
||||
left_points, right_points = [], []
|
||||
bw_width = 170
|
||||
crosswalk_y = None
|
||||
for cnt in contours:
|
||||
x, y, w, h = cv2.boundingRect(cnt)
|
||||
if w > bw_width:
|
||||
left_points.append([x, y])
|
||||
right_points.append([x + w, y])
|
||||
# RANSAC fit
|
||||
crosswalk_detected = False
|
||||
if len(left_points) > 5 and len(right_points) > 5:
|
||||
left_points = np.array(left_points)
|
||||
right_points = np.array(right_points)
|
||||
model_l = linear_model.RANSACRegressor().fit(left_points[:, 0:1], left_points[:, 1])
|
||||
model_r = linear_model.RANSACRegressor().fit(right_points[:, 0:1], right_points[:, 1])
|
||||
# If the lines are roughly parallel and horizontal, assume crosswalk
|
||||
slope_l = model_l.estimator_.coef_[0]
|
||||
slope_r = model_r.estimator_.coef_[0]
|
||||
if abs(slope_l) < 0.3 and abs(slope_r) < 0.3:
|
||||
crosswalk_detected = True
|
||||
crosswalk_y = int(np.median(left_points[:, 1]))
|
||||
return {'crosswalk_detected': crosswalk_detected, 'crosswalk_y': crosswalk_y}
|
||||
|
||||
def get_traffic_light_color(frame, bbox):
|
||||
"""Detect traffic light color in the given bounding box (x_min, y_min, x_max, y_max). Returns 'red', 'yellow', 'green', or 'unknown'."""
|
||||
x_min, y_min, x_max, y_max = bbox
|
||||
roi = frame[max(0, y_min):y_max, max(0, x_min):x_max]
|
||||
if roi.size == 0:
|
||||
return 'unknown'
|
||||
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
|
||||
mask_red1 = cv2.inRange(hsv, (0, 70, 50), (10, 255, 255))
|
||||
mask_red2 = cv2.inRange(hsv, (170, 70, 50), (180, 255, 255))
|
||||
mask_red = cv2.bitwise_or(mask_red1, mask_red2)
|
||||
mask_yellow = cv2.inRange(hsv, (15, 70, 50), (35, 255, 255))
|
||||
mask_green = cv2.inRange(hsv, (40, 70, 50), (90, 255, 255))
|
||||
red = np.sum(mask_red)
|
||||
yellow = np.sum(mask_yellow)
|
||||
green = np.sum(mask_green)
|
||||
if max(red, yellow, green) == 0:
|
||||
return 'unknown'
|
||||
if red >= yellow and red >= green:
|
||||
return 'red'
|
||||
elif yellow >= green:
|
||||
return 'yellow'
|
||||
else:
|
||||
return 'green'
|
||||
|
||||
##model manager working
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import OpenVINO modules
|
||||
from detection_openvino import OpenVINOVehicleDetector
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
|
||||
# Import from our utils package
|
||||
from utils.helpers import bbox_iou
|
||||
|
||||
class ModelManager:
|
||||
"""
|
||||
Manages OpenVINO models for traffic detection and violation monitoring.
|
||||
Only uses RedLightViolationPipeline for all violation/crosswalk/traffic light logic.
|
||||
"""
|
||||
def __init__(self, config_file: str = None):
|
||||
"""
|
||||
Initialize model manager with configuration.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.detector = None
|
||||
self.violation_pipeline = None # Use RedLightViolationPipeline only
|
||||
self.tracker = None
|
||||
self._initialize_models()
|
||||
|
||||
def _load_config(self, config_file: Optional[str]) -> Dict:
|
||||
"""
|
||||
Load configuration from file or use defaults.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
"""
|
||||
import json
|
||||
default_config = {
|
||||
"detection": {
|
||||
"confidence_threshold": 0.5,
|
||||
"enable_ocr": True,
|
||||
"enable_tracking": True,
|
||||
"model_path": None
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": True,
|
||||
"show_labels": True,
|
||||
"show_license_plates": True
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
}
|
||||
}
|
||||
|
||||
if config_file and os.path.exists(config_file):
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
loaded_config = json.load(f)
|
||||
# Merge with defaults (preserving loaded values)
|
||||
for section in default_config:
|
||||
if section in loaded_config:
|
||||
default_config[section].update(loaded_config[section])
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
|
||||
return default_config
|
||||
|
||||
def _initialize_models(self):
|
||||
"""Initialize OpenVINO detection and violation models."""
|
||||
try:
|
||||
# Find best model path
|
||||
model_path = self.config["detection"].get("model_path")
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
model_path = self._find_best_model_path()
|
||||
if not model_path:
|
||||
print("❌ No model found")
|
||||
return
|
||||
|
||||
# Initialize detector
|
||||
print(f"✅ Initializing OpenVINO detector with model: {model_path}")
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
print(f"✅ Using inference device: {device}")
|
||||
self.detector = OpenVINOVehicleDetector(
|
||||
model_path=model_path,
|
||||
device=device,
|
||||
confidence_threshold=self.config["detection"]["confidence_threshold"]
|
||||
)
|
||||
|
||||
# Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic
|
||||
self.violation_pipeline = RedLightViolationPipeline(debug=True)
|
||||
print("✅ Red light violation pipeline initialized (all other violation logic removed)")
|
||||
|
||||
# Initialize tracker if enabled
|
||||
if self.config["detection"]["enable_tracking"]:
|
||||
try:
|
||||
from deep_sort_realtime.deepsort_tracker import DeepSort
|
||||
|
||||
# Use optimized OpenVINO embedder if available
|
||||
use_optimized_embedder = True
|
||||
embedder = None
|
||||
|
||||
if use_optimized_embedder:
|
||||
try:
|
||||
# Try importing our custom OpenVINO embedder
|
||||
from utils.embedder_openvino import OpenVINOEmbedder
|
||||
print(f"✅ Initializing optimized OpenVINO embedder on {device}")
|
||||
|
||||
# Set model_path explicitly to use the user-supplied model
|
||||
script_dir = Path(__file__).parent.parent
|
||||
model_file_path = None
|
||||
|
||||
# Try the copy version first (might be modified for compatibility)
|
||||
copy_model_path = script_dir / "mobilenetv2 copy.xml"
|
||||
original_model_path = script_dir / "mobilenetv2.xml"
|
||||
|
||||
if copy_model_path.exists():
|
||||
model_file_path = str(copy_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
elif original_model_path.exists():
|
||||
model_file_path = str(original_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
|
||||
embedder = OpenVINOEmbedder(
|
||||
model_path=model_file_path,
|
||||
device=device,
|
||||
half=True # Use FP16 for better performance
|
||||
)
|
||||
except Exception as emb_err:
|
||||
print(f"⚠️ OpenVINO embedder failed: {emb_err}, falling back to default")
|
||||
|
||||
# Initialize tracker with embedder based on available parameters
|
||||
if embedder is None:
|
||||
print("⚠️ No embedder available, using DeepSORT with default tracking")
|
||||
else:
|
||||
print("✅ Initializing DeepSORT with custom embedder")
|
||||
|
||||
# Simple initialization without problematic parameters
|
||||
self.tracker = DeepSort(
|
||||
max_age=30,
|
||||
n_init=3,
|
||||
nn_budget=100,
|
||||
embedder=embedder
|
||||
)
|
||||
print("✅ DeepSORT tracker initialized")
|
||||
except ImportError:
|
||||
print("⚠️ DeepSORT not available")
|
||||
self.tracker = None
|
||||
print("✅ Models initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error initializing models: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]:
|
||||
"""
|
||||
Find best available model file in workspace.
|
||||
|
||||
Args:
|
||||
base_model_name: Base model name without extension
|
||||
|
||||
Returns:
|
||||
Path to model file or None
|
||||
"""
|
||||
# Select model based on device if base_model_name is not specified
|
||||
if base_model_name is None:
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
if device == "CPU" or device == "AUTO":
|
||||
# Use yolo11n for CPU - faster, lighter model
|
||||
base_model_name = "yolo11n"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
|
||||
else:
|
||||
# Use yolo11x for GPU - larger model with better accuracy
|
||||
base_model_name = "yolo11x"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
|
||||
|
||||
# Check if the openvino_models directory exists in the current working directory
|
||||
cwd_openvino_dir = Path.cwd() / "openvino_models"
|
||||
if cwd_openvino_dir.exists():
|
||||
direct_path = cwd_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model directly in CWD: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for absolute path to openvino_models (this is the most reliable)
|
||||
absolute_openvino_dir = Path("D:/Downloads/finale6/khatam/openvino_models")
|
||||
if absolute_openvino_dir.exists():
|
||||
direct_path = absolute_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model at absolute path: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Try relative to the model_manager.py file
|
||||
openvino_models_dir = Path(__file__).parent.parent.parent / "openvino_models"
|
||||
direct_path = openvino_models_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model in app directory: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for model in folder structure within openvino_models
|
||||
subfolder_path = openvino_models_dir / f"{base_model_name}_openvino_model" / f"{base_model_name}.xml"
|
||||
if subfolder_path.exists():
|
||||
print(f"✅ Found model in subfolder: {subfolder_path}")
|
||||
return str(subfolder_path.absolute())
|
||||
|
||||
# Try other common locations
|
||||
search_dirs = [
|
||||
".",
|
||||
"..",
|
||||
"../models",
|
||||
"../rcb",
|
||||
"../openvino_models",
|
||||
f"../{base_model_name}_openvino_model",
|
||||
"../..", # Go up to project root
|
||||
"../../openvino_models", # Project root / openvino_models
|
||||
]
|
||||
|
||||
model_extensions = [
|
||||
(f"{base_model_name}.xml", "OpenVINO IR direct"),
|
||||
(f"{base_model_name}_openvino_model/{base_model_name}.xml", "OpenVINO IR"),
|
||||
(f"{base_model_name}.pt", "PyTorch"),
|
||||
]
|
||||
|
||||
for search_dir in search_dirs:
|
||||
search_path = Path(__file__).parent.parent / search_dir
|
||||
if not search_path.exists():
|
||||
continue
|
||||
|
||||
for model_file, model_type in model_extensions:
|
||||
model_path = search_path / model_file
|
||||
if model_path.exists():
|
||||
print(f"✅ Found {model_type} model: {model_path}")
|
||||
return str(model_path.absolute())
|
||||
|
||||
print(f"❌ No model found for {base_model_name}")
|
||||
return None
|
||||
|
||||
def detect(self, frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Detect objects in frame.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
|
||||
Returns:
|
||||
List of detection dictionaries
|
||||
"""
|
||||
if self.detector is None:
|
||||
print("WARNING: No detector available")
|
||||
return []
|
||||
try:
|
||||
# Use a lower confidence threshold for better visibility
|
||||
conf_threshold = max(0.3, self.config["detection"].get("confidence_threshold", 0.5))
|
||||
detections = self.detector.detect_vehicles(frame, conf_threshold=conf_threshold)
|
||||
|
||||
# Add debug output
|
||||
if detections:
|
||||
print(f"DEBUG: Detected {len(detections)} objects: " +
|
||||
", ".join([f"{d['class_name']} ({d['confidence']:.2f})" for d in detections[:3]]))
|
||||
|
||||
# Print bounding box coordinates of first detection
|
||||
if len(detections) > 0:
|
||||
print(f"DEBUG: First detection bbox: {detections[0]['bbox']}")
|
||||
else:
|
||||
print("DEBUG: No detections in this frame")
|
||||
|
||||
return detections
|
||||
except Exception as e:
|
||||
print(f"❌ Detection error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracking(self, detections: List[Dict], frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Update tracking information for detections.
|
||||
|
||||
Args:
|
||||
detections: List of detections
|
||||
frame: Current video frame
|
||||
|
||||
Returns:
|
||||
Updated list of detections with tracking info
|
||||
"""
|
||||
if not self.tracker or not detections:
|
||||
return detections
|
||||
|
||||
try:
|
||||
# Format detections for DeepSORT
|
||||
tracker_dets = []
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
x1, y1, x2, y2 = bbox
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
|
||||
if w <= 0 or h <= 0:
|
||||
continue
|
||||
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_name = det.get('class_name', 'unknown')
|
||||
tracker_dets.append(([x1, y1, w, h], conf, class_name))
|
||||
|
||||
# Update tracks
|
||||
if tracker_dets:
|
||||
tracks = self.tracker.update_tracks(tracker_dets, frame=frame)
|
||||
|
||||
# Associate tracks with detections
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
|
||||
track_id = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
dx1, dy1, dx2, dy2 = bbox
|
||||
iou = bbox_iou((dx1, dy1, dx2, dy2), tuple(map(int, ltrb)))
|
||||
|
||||
if iou > 0.5:
|
||||
det['track_id'] = track_id
|
||||
break
|
||||
return detections
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Tracking error: {e}")
|
||||
return detections
|
||||
|
||||
def update_config(self, new_config: Dict):
|
||||
"""
|
||||
Update configuration parameters.
|
||||
|
||||
Args:
|
||||
new_config: New configuration dictionary
|
||||
"""
|
||||
if not new_config:
|
||||
return
|
||||
|
||||
# Store old device setting to check if it changed
|
||||
old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO"
|
||||
|
||||
# Update configuration
|
||||
for section in new_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(new_config[section])
|
||||
else:
|
||||
self.config[section] = new_config[section]
|
||||
|
||||
# Check if device changed - if so, we need to reinitialize models
|
||||
new_device = self.config["detection"].get("device", "AUTO")
|
||||
device_changed = old_device != new_device
|
||||
|
||||
if device_changed:
|
||||
print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...")
|
||||
# Reinitialize models with new device
|
||||
self._initialize_models()
|
||||
return
|
||||
|
||||
# Just update detector confidence threshold if device didn't change
|
||||
if self.detector:
|
||||
conf_thres = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
self.detector.conf_thres = conf_thres
|
||||
41
qt_app_pyside1/controllers/performance_overlay.py
Normal file
41
qt_app_pyside1/controllers/performance_overlay.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from PySide6.QtWidgets import QWidget, QLabel, QVBoxLayout
|
||||
from PySide6.QtCore import QTimer
|
||||
import psutil
|
||||
|
||||
class PerformanceOverlay(QWidget):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setWindowFlags(self.windowFlags() | 0x00080000) # Qt.ToolTip
|
||||
layout = QVBoxLayout(self)
|
||||
self.cpu_label = QLabel("CPU: --%")
|
||||
self.ram_label = QLabel("RAM: --%")
|
||||
self.fps_label = QLabel("FPS: --")
|
||||
self.infer_label = QLabel("Inference: -- ms")
|
||||
layout.addWidget(self.cpu_label)
|
||||
layout.addWidget(self.ram_label)
|
||||
layout.addWidget(self.fps_label)
|
||||
layout.addWidget(self.infer_label)
|
||||
self.fps = None
|
||||
self.infer_time = None
|
||||
self.update_stats()
|
||||
# Add timer for auto-refresh
|
||||
self.timer = QTimer(self)
|
||||
self.timer.timeout.connect(self.update_stats)
|
||||
self.timer.start(1000) # Update every second
|
||||
|
||||
def update_stats(self):
|
||||
self.cpu_label.setText(f"CPU: {psutil.cpu_percent()}%")
|
||||
self.ram_label.setText(f"RAM: {psutil.virtual_memory().percent}%")
|
||||
if self.fps is not None:
|
||||
self.fps_label.setText(f"FPS: {self.fps:.1f}")
|
||||
else:
|
||||
self.fps_label.setText("FPS: --")
|
||||
if self.infer_time is not None:
|
||||
self.infer_label.setText(f"Inference: {self.infer_time:.1f} ms")
|
||||
else:
|
||||
self.infer_label.setText("Inference: -- ms")
|
||||
|
||||
def set_video_stats(self, fps, inference_time):
|
||||
self.fps = fps
|
||||
self.infer_time = inference_time
|
||||
self.update_stats()
|
||||
306
qt_app_pyside1/controllers/red_light_violation_detector.py
Normal file
306
qt_app_pyside1/controllers/red_light_violation_detector.py
Normal file
@@ -0,0 +1,306 @@
|
||||
"""
|
||||
Red Light Violation Detector for traffic monitoring in Qt application
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import time
|
||||
from typing import Dict, List, Tuple, Optional, Any
|
||||
from collections import deque
|
||||
import datetime
|
||||
import os
|
||||
|
||||
# Import utilities
|
||||
from utils.crosswalk_utils2 import (
|
||||
detect_crosswalk_and_violation_line,
|
||||
draw_violation_line
|
||||
)
|
||||
# Import traffic light utilities
|
||||
try:
|
||||
from utils.traffic_light_utils import detect_traffic_light_color, draw_traffic_light_status
|
||||
print("✅ Imported traffic light utilities in violation detector")
|
||||
except ImportError:
|
||||
def detect_traffic_light_color(frame, bbox):
|
||||
return {"color": "unknown", "confidence": 0.0}
|
||||
def draw_traffic_light_status(frame, bbox, color):
|
||||
return frame
|
||||
print("⚠️ Failed to import traffic light utilities")
|
||||
|
||||
class RedLightViolationDetector:
|
||||
"""
|
||||
Detect red light violations based on traffic light status and vehicle positions.
|
||||
|
||||
This class integrates crosswalk/stop line detection with traffic light color
|
||||
detection to identify vehicles that cross the line during a red light.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the detector with default settings."""
|
||||
# Detection state
|
||||
self.violation_line_y = None
|
||||
self.detection_enabled = True
|
||||
self.detection_mode = "auto" # "auto", "crosswalk", "stopline"
|
||||
|
||||
# Track vehicles for violation detection
|
||||
self.tracked_vehicles = {} # id -> {position_history, violation_status}
|
||||
self.violations = []
|
||||
|
||||
# Store frames for snapshots/video clips
|
||||
self.violation_buffer = deque(maxlen=30) # Store ~1 second of frames
|
||||
|
||||
# Settings
|
||||
self.confidence_threshold = 0.5
|
||||
self.save_snapshots = True
|
||||
self.snapshot_dir = os.path.join(os.path.expanduser("~"), "Documents", "TrafficViolations")
|
||||
os.makedirs(self.snapshot_dir, exist_ok=True)
|
||||
|
||||
def detect_violation_line(self, frame: np.ndarray, traffic_light_bbox: Optional[List[int]] = None) -> int:
|
||||
"""
|
||||
Detect the violation line in the frame using crosswalk or stop line detection.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
traffic_light_bbox: Optional traffic light bounding box for context
|
||||
|
||||
Returns:
|
||||
Y-coordinate of the violation line
|
||||
"""
|
||||
frame_height = frame.shape[0]
|
||||
|
||||
try:
|
||||
# Try to detect crosswalk first if mode is auto or crosswalk
|
||||
if self.detection_mode in ["auto", "crosswalk"]:
|
||||
# Use the new function for crosswalk and violation line detection
|
||||
result_frame, crosswalk_bbox, violation_line_y, crosswalk_debug = detect_crosswalk_and_violation_line(frame)
|
||||
print(f"Crosswalk detection result: bbox={crosswalk_bbox}, vline_y={violation_line_y}")
|
||||
frame = result_frame # Use the frame with overlays for further processing or display
|
||||
if crosswalk_bbox:
|
||||
# Use the top of the crosswalk as the violation line
|
||||
self.violation_line_y = crosswalk_bbox[1] - 10 # 10px before crosswalk
|
||||
self.detection_mode = "crosswalk" # If auto and found crosswalk, switch to crosswalk mode
|
||||
print(f"✅ Using crosswalk for violation line at y={self.violation_line_y}")
|
||||
return self.violation_line_y
|
||||
|
||||
# If traffic light is detected, position line below it
|
||||
if traffic_light_bbox:
|
||||
x1, y1, x2, y2 = traffic_light_bbox
|
||||
# Position the line a bit below the traffic light
|
||||
proposed_y = y2 + int(frame_height * 0.15) # 15% of frame height below traffic light
|
||||
# Don't place too low in the frame
|
||||
if proposed_y < frame_height * 0.85:
|
||||
self.violation_line_y = proposed_y
|
||||
print(f"✅ Using traffic light position for violation line at y={self.violation_line_y}")
|
||||
return self.violation_line_y
|
||||
|
||||
# If nothing detected, use a default position based on frame height
|
||||
self.violation_line_y = int(frame_height * 0.75) # Default position at 75% of frame height
|
||||
print(f"ℹ️ Using default violation line at y={self.violation_line_y}")
|
||||
|
||||
return self.violation_line_y
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error in detect_violation_line: {e}")
|
||||
# Fallback
|
||||
return int(frame_height * 0.75)
|
||||
|
||||
def process_frame(self, frame: np.ndarray, detections: List[Dict],
|
||||
current_traffic_light_color: str) -> Tuple[np.ndarray, List[Dict]]:
|
||||
"""
|
||||
Process a frame to detect red light violations.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
detections: List of detection dictionaries with 'class_name', 'bbox', etc.
|
||||
current_traffic_light_color: Current traffic light color ('red', 'yellow', 'green', 'unknown')
|
||||
|
||||
Returns:
|
||||
Tuple of (annotated frame, list of violation events)
|
||||
"""
|
||||
if not self.detection_enabled:
|
||||
return frame, []
|
||||
|
||||
# Store original frame for violation buffer
|
||||
self.violation_buffer.append(frame.copy())
|
||||
|
||||
# Annotate frame for visualization
|
||||
annotated_frame = frame.copy()
|
||||
# Get traffic light position if available
|
||||
traffic_light_bbox = None
|
||||
for det in detections:
|
||||
# Check for both 'traffic light' and class_id 9 (COCO class for traffic light)
|
||||
if det.get('class_name') == 'traffic light' or det.get('class_id') == 9:
|
||||
traffic_light_bbox = det.get('bbox')
|
||||
print(f"Found traffic light with bbox: {traffic_light_bbox}")
|
||||
break
|
||||
|
||||
# Detect violation line if not already detected
|
||||
if self.violation_line_y is None or self.violation_line_y <= 0:
|
||||
print(f"Detecting violation line with traffic light bbox: {traffic_light_bbox}")
|
||||
try:
|
||||
self.violation_line_y = self.detect_violation_line(frame, traffic_light_bbox)
|
||||
print(f"Successfully detected violation line at y={self.violation_line_y}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error detecting violation line: {e}")
|
||||
# Fallback to default position
|
||||
self.violation_line_y = int(frame.shape[0] * 0.75)
|
||||
print(f"Using default violation line at y={self.violation_line_y}")
|
||||
|
||||
# Draw violation line with enhanced visualization
|
||||
# Handle both string and dictionary return formats for compatibility
|
||||
if isinstance(current_traffic_light_color, dict):
|
||||
is_red = current_traffic_light_color.get("color") == "red"
|
||||
confidence = current_traffic_light_color.get("confidence", 0.0)
|
||||
confidence_text = f" (Conf: {confidence:.2f})"
|
||||
else:
|
||||
is_red = current_traffic_light_color == "red"
|
||||
confidence_text = ""
|
||||
|
||||
line_color = (0, 0, 255) if is_red else (0, 255, 0)
|
||||
annotated_frame = draw_violation_line(
|
||||
annotated_frame,
|
||||
self.violation_line_y,
|
||||
line_color,
|
||||
f"VIOLATION LINE - {current_traffic_light_color.get('color', current_traffic_light_color).upper()}{confidence_text}"
|
||||
)
|
||||
|
||||
# --- DEBUG: Always draw a hardcoded violation line for testing ---
|
||||
if self.violation_line_y is None or self.violation_line_y <= 0:
|
||||
frame_height = frame.shape[0]
|
||||
# Example: draw at 75% of frame height
|
||||
self.violation_line_y = int(frame_height * 0.75)
|
||||
print(f"[DEBUG] Drawing fallback violation line at y={self.violation_line_y}")
|
||||
import cv2
|
||||
cv2.line(annotated_frame, (0, self.violation_line_y), (frame.shape[1], self.violation_line_y), (0, 0, 255), 3)
|
||||
|
||||
# Track vehicles and check for violations
|
||||
violations_this_frame = []
|
||||
|
||||
# Process each detection
|
||||
for detection in detections:
|
||||
class_name = detection.get('class_name')
|
||||
confidence = detection.get('confidence', 0.0)
|
||||
bbox = detection.get('bbox')
|
||||
track_id = detection.get('track_id', -1)
|
||||
# Only process vehicles with sufficient confidence
|
||||
# Include both class_name and class_id checks for better compatibility
|
||||
is_vehicle = (class_name in ['car', 'truck', 'bus', 'motorcycle'] or
|
||||
detection.get('class_id') in [2, 3, 5, 7]) # COCO classes for vehicles
|
||||
|
||||
if (is_vehicle and
|
||||
confidence >= self.confidence_threshold and
|
||||
bbox is not None):
|
||||
# Use object id or generate temporary one if tracking id not available
|
||||
if track_id < 0:
|
||||
# Generate a temporary ID based on position and size
|
||||
x1, y1, x2, y2 = bbox
|
||||
temp_id = f"temp_{int((x1+x2)/2)}_{int((y1+y2)/2)}_{int((x2-x1)*(y2-y1))}"
|
||||
track_id = temp_id
|
||||
|
||||
# Initialize tracking if this is a new vehicle
|
||||
if track_id not in self.tracked_vehicles:
|
||||
print(f"🚗 New vehicle detected with ID: {track_id}")
|
||||
self.tracked_vehicles[track_id] = {
|
||||
'positions': deque(maxlen=30), # Store ~1 second of positions
|
||||
'violated': False,
|
||||
'first_detected': time.time()
|
||||
}
|
||||
|
||||
# Update position history
|
||||
vehicle_data = self.tracked_vehicles[track_id]
|
||||
vehicle_data['positions'].append((bbox, time.time()))
|
||||
|
||||
# Check for violation only if traffic light is red
|
||||
# Handle both string and dictionary return formats
|
||||
is_red = False
|
||||
if isinstance(current_traffic_light_color, dict):
|
||||
is_red = current_traffic_light_color.get("color") == "red"
|
||||
confidence = current_traffic_light_color.get("confidence", 0.0)
|
||||
# Only consider red if confidence is above threshold
|
||||
is_red = is_red and confidence >= 0.4
|
||||
else:
|
||||
is_red = current_traffic_light_color == "red"
|
||||
|
||||
if (is_red and
|
||||
not vehicle_data['violated'] and
|
||||
check_vehicle_violation(bbox, self.violation_line_y)):
|
||||
|
||||
# Mark as violated
|
||||
vehicle_data['violated'] = True
|
||||
|
||||
# Create violation record with enhanced information
|
||||
violation = {
|
||||
'id': len(self.violations) + 1,
|
||||
'track_id': track_id,
|
||||
'timestamp': datetime.datetime.now(),
|
||||
'vehicle_type': class_name,
|
||||
'confidence': detection.get('confidence', 0.0),
|
||||
'bbox': bbox,
|
||||
'violation_type': 'red_light',
|
||||
'snapshot_path': None
|
||||
}
|
||||
|
||||
# Add traffic light information if available
|
||||
if isinstance(current_traffic_light_color, dict):
|
||||
violation['traffic_light'] = {
|
||||
'color': current_traffic_light_color.get('color', 'red'),
|
||||
'confidence': current_traffic_light_color.get('confidence', 0.0)
|
||||
}
|
||||
else:
|
||||
violation['traffic_light'] = {
|
||||
'color': current_traffic_light_color,
|
||||
'confidence': 1.0
|
||||
}
|
||||
|
||||
# Save snapshot if enabled
|
||||
if self.save_snapshots:
|
||||
snapshot_path = os.path.join(
|
||||
self.snapshot_dir,
|
||||
f"violation_{violation['id']}_{int(time.time())}.jpg"
|
||||
)
|
||||
cv2.imwrite(snapshot_path, frame)
|
||||
violation['snapshot_path'] = snapshot_path
|
||||
|
||||
# Add to violations list
|
||||
self.violations.append(violation)
|
||||
violations_this_frame.append(violation)
|
||||
|
||||
# Draw violation box
|
||||
x1, y1, x2, y2 = bbox
|
||||
cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), (0, 0, 255), 3)
|
||||
cv2.putText(
|
||||
annotated_frame,
|
||||
f"RED LIGHT VIOLATION #{violation['id']}",
|
||||
(x1, y1 - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.7,
|
||||
(0, 0, 255),
|
||||
2
|
||||
)
|
||||
|
||||
# Clean up old tracked vehicles to prevent memory leaks
|
||||
current_time = time.time()
|
||||
old_ids = [tid for tid, data in self.tracked_vehicles.items()
|
||||
if current_time - data['first_detected'] > 30] # Remove after 30 seconds
|
||||
for tid in old_ids:
|
||||
del self.tracked_vehicles[tid]
|
||||
|
||||
return annotated_frame, violations_this_frame
|
||||
|
||||
def reset(self):
|
||||
"""Reset the detector state."""
|
||||
self.violation_line_y = None
|
||||
self.tracked_vehicles = {}
|
||||
# Keep violations history
|
||||
|
||||
def get_violations(self) -> List[Dict]:
|
||||
"""
|
||||
Get all detected violations.
|
||||
|
||||
Returns:
|
||||
List of violation dictionaries
|
||||
"""
|
||||
return self.violations
|
||||
|
||||
def clear_violations(self):
|
||||
"""Clear all violation records."""
|
||||
self.violations = []
|
||||
9595
qt_app_pyside1/controllers/video_controller.py
Normal file
9595
qt_app_pyside1/controllers/video_controller.py
Normal file
File diff suppressed because it is too large
Load Diff
384
qt_app_pyside1/controllers/video_controller.py.new
Normal file
384
qt_app_pyside1/controllers/video_controller.py.new
Normal file
@@ -0,0 +1,384 @@
|
||||
from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer
|
||||
from PySide6.QtGui import QImage, QPixmap
|
||||
import cv2
|
||||
import time
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
from typing import Dict, List, Optional
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
# Import utilities
|
||||
from utils.annotation_utils import (
|
||||
draw_detections,
|
||||
draw_violations,
|
||||
draw_performance_metrics,
|
||||
resize_frame_for_display,
|
||||
convert_cv_to_qimage,
|
||||
convert_cv_to_pixmap
|
||||
)
|
||||
|
||||
class VideoController(QObject):
|
||||
frame_ready = Signal(object, object, object, dict) # QPixmap, detections, violations, metrics
|
||||
raw_frame_ready = Signal(np.ndarray, list, list, float) # frame, detections, violations, fps
|
||||
|
||||
def __init__(self, model_manager=None):
|
||||
"""
|
||||
Initialize video controller.
|
||||
|
||||
Args:
|
||||
model_manager: Model manager instance for detection and violation
|
||||
"""
|
||||
super().__init__()
|
||||
self.model_manager = model_manager
|
||||
self.source = 0 # Default camera source
|
||||
self._running = False
|
||||
self.frame_count = 0
|
||||
self.start_time = 0
|
||||
self.source_fps = 0
|
||||
self.actual_fps = 0
|
||||
self.processing_times = deque(maxlen=30)
|
||||
self.cap = None # VideoCapture object
|
||||
|
||||
# Configure thread
|
||||
self.thread = QThread()
|
||||
self.moveToThread(self.thread)
|
||||
self.thread.started.connect(self._run)
|
||||
|
||||
# Performance measurement
|
||||
self.mutex = QMutex()
|
||||
self.condition = QWaitCondition()
|
||||
self.performance_metrics = {
|
||||
'FPS': 0.0,
|
||||
'Detection (ms)': 0.0,
|
||||
'Violation (ms)': 0.0,
|
||||
'Total (ms)': 0.0
|
||||
}
|
||||
|
||||
# Setup render timer
|
||||
self.render_timer = QTimer()
|
||||
self.render_timer.timeout.connect(self._process_frame)
|
||||
|
||||
# Frame buffer
|
||||
self.current_frame = None
|
||||
self.current_detections = []
|
||||
self.current_violations = []
|
||||
|
||||
# Debug counter
|
||||
self.debug_counter = 0
|
||||
|
||||
def set_source(self, source):
|
||||
"""Set video source (file path, camera index, or URL)"""
|
||||
print(f"DEBUG: VideoController.set_source called with: {source} (type: {type(source)})")
|
||||
|
||||
was_running = self._running
|
||||
if self._running:
|
||||
self.stop()
|
||||
|
||||
# Critical fix: Make sure source is properly set
|
||||
if source is None:
|
||||
print("WARNING: Received None source, defaulting to camera 0")
|
||||
self.source = 0
|
||||
elif isinstance(source, str) and source.strip():
|
||||
# Handle file paths - verify the file exists
|
||||
if os.path.exists(source):
|
||||
self.source = source
|
||||
print(f"DEBUG: VideoController source set to file: {self.source}")
|
||||
else:
|
||||
# Try to interpret as camera index or URL
|
||||
try:
|
||||
# If it's a digit string, convert to integer camera index
|
||||
if source.isdigit():
|
||||
self.source = int(source)
|
||||
print(f"DEBUG: VideoController source set to camera index: {self.source}")
|
||||
else:
|
||||
# Treat as URL or special device string
|
||||
self.source = source
|
||||
print(f"DEBUG: VideoController source set to URL/device: {self.source}")
|
||||
except ValueError:
|
||||
print(f"WARNING: Could not interpret source: {source}, defaulting to camera 0")
|
||||
self.source = 0
|
||||
elif isinstance(source, int):
|
||||
# Camera index
|
||||
self.source = source
|
||||
print(f"DEBUG: VideoController source set to camera index: {self.source}")
|
||||
else:
|
||||
print(f"WARNING: Unrecognized source type: {type(source)}, defaulting to camera 0")
|
||||
self.source = 0
|
||||
|
||||
# Get properties of the source (fps, dimensions, etc)
|
||||
self._get_source_properties()
|
||||
|
||||
if was_running:
|
||||
self.start()
|
||||
|
||||
def _get_source_properties(self):
|
||||
"""Get properties of video source"""
|
||||
try:
|
||||
cap = cv2.VideoCapture(self.source)
|
||||
if cap.isOpened():
|
||||
self.source_fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
if self.source_fps <= 0:
|
||||
self.source_fps = 30.0 # Default if undetectable
|
||||
|
||||
self.frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
self.frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
cap.release()
|
||||
|
||||
print(f"Video source: {self.frame_width}x{self.frame_height}, {self.source_fps} FPS")
|
||||
else:
|
||||
print("Failed to open video source")
|
||||
except Exception as e:
|
||||
print(f"Error getting source properties: {e}")
|
||||
|
||||
def start(self):
|
||||
"""Start video processing"""
|
||||
if not self._running:
|
||||
self._running = True
|
||||
self.start_time = time.time()
|
||||
self.frame_count = 0
|
||||
self.debug_counter = 0
|
||||
print("DEBUG: Starting video processing thread")
|
||||
|
||||
# Start the processing thread
|
||||
if not self.thread.isRunning():
|
||||
self.thread.start()
|
||||
|
||||
# Start the render timer with a faster interval (16ms = ~60fps)
|
||||
self.render_timer.start(16)
|
||||
print("DEBUG: Render timer started")
|
||||
|
||||
def stop(self):
|
||||
"""Stop video processing"""
|
||||
if self._running:
|
||||
print("DEBUG: Stopping video processing")
|
||||
self._running = False
|
||||
self.render_timer.stop()
|
||||
|
||||
# Properly terminate the thread
|
||||
self.thread.quit()
|
||||
if not self.thread.wait(3000): # Wait 3 seconds max
|
||||
self.thread.terminate()
|
||||
print("WARNING: Thread termination forced")
|
||||
|
||||
# Close the capture if it exists
|
||||
if self.cap and self.cap.isOpened():
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
# Clear the current frame
|
||||
self.mutex.lock()
|
||||
self.current_frame = None
|
||||
self.mutex.unlock()
|
||||
print("DEBUG: Video processing stopped")
|
||||
|
||||
def capture_snapshot(self) -> np.ndarray:
|
||||
"""Capture current frame"""
|
||||
if self.current_frame is not None:
|
||||
return self.current_frame.copy()
|
||||
return None
|
||||
|
||||
def _run(self):
|
||||
"""Main processing loop (runs in thread)"""
|
||||
try:
|
||||
# Print the source we're trying to open
|
||||
print(f"DEBUG: Opening video source: {self.source} (type: {type(self.source)})")
|
||||
|
||||
# Initialize the capture
|
||||
self.cap = None
|
||||
|
||||
# Handle different source types
|
||||
if isinstance(self.source, str) and os.path.exists(self.source):
|
||||
# It's a valid file path
|
||||
print(f"DEBUG: Opening video file: {self.source}")
|
||||
self.cap = cv2.VideoCapture(self.source)
|
||||
|
||||
# Verify file opened successfully
|
||||
if not self.cap.isOpened():
|
||||
print(f"ERROR: Could not open video file: {self.source}")
|
||||
return
|
||||
|
||||
elif isinstance(self.source, int) or (isinstance(self.source, str) and self.source.isdigit()):
|
||||
# It's a camera index
|
||||
camera_idx = int(self.source) if isinstance(self.source, str) else self.source
|
||||
print(f"DEBUG: Opening camera: {camera_idx}")
|
||||
self.cap = cv2.VideoCapture(camera_idx)
|
||||
|
||||
# Try a few times to open camera (sometimes takes a moment)
|
||||
retry_count = 0
|
||||
while not self.cap.isOpened() and retry_count < 3:
|
||||
print(f"Camera not ready, retrying ({retry_count+1}/3)...")
|
||||
time.sleep(1)
|
||||
self.cap.release()
|
||||
self.cap = cv2.VideoCapture(camera_idx)
|
||||
retry_count += 1
|
||||
|
||||
if not self.cap.isOpened():
|
||||
print(f"ERROR: Could not open camera {camera_idx} after {retry_count} attempts")
|
||||
return
|
||||
else:
|
||||
# Try as a string source (URL or device path)
|
||||
print(f"DEBUG: Opening source as string: {self.source}")
|
||||
self.cap = cv2.VideoCapture(str(self.source))
|
||||
|
||||
if not self.cap.isOpened():
|
||||
print(f"ERROR: Could not open source: {self.source}")
|
||||
return
|
||||
|
||||
# Check again to ensure capture is valid
|
||||
if not self.cap or not self.cap.isOpened():
|
||||
print(f"ERROR: Could not open video source {self.source}")
|
||||
return
|
||||
|
||||
# Configure frame timing based on source FPS
|
||||
frame_time = 1.0 / self.source_fps if self.source_fps > 0 else 0.033
|
||||
prev_time = time.time()
|
||||
|
||||
# Log successful opening
|
||||
print(f"SUCCESS: Video source opened: {self.source}")
|
||||
print(f"Source info - FPS: {self.source_fps}, Size: {self.frame_width}x{self.frame_height}")
|
||||
|
||||
# Main processing loop
|
||||
while self._running and self.cap.isOpened():
|
||||
ret, frame = self.cap.read()
|
||||
if not ret:
|
||||
print("End of video or read error")
|
||||
break
|
||||
|
||||
# Detection and violation processing
|
||||
process_start = time.time()
|
||||
|
||||
# Process detections
|
||||
detection_start = time.time()
|
||||
detections = []
|
||||
if self.model_manager:
|
||||
detections = self.model_manager.detect(frame)
|
||||
detection_time = (time.time() - detection_start) * 1000
|
||||
|
||||
# Violation detection is disabled
|
||||
violation_start = time.time()
|
||||
violations = []
|
||||
# if self.model_manager and detections:
|
||||
# violations = self.model_manager.detect_violations(
|
||||
# detections, frame, time.time()
|
||||
# )
|
||||
violation_time = (time.time() - violation_start) * 1000
|
||||
|
||||
# Update tracking if available
|
||||
if self.model_manager:
|
||||
detections = self.model_manager.update_tracking(detections, frame)
|
||||
|
||||
# Calculate timing metrics
|
||||
process_time = (time.time() - process_start) * 1000
|
||||
self.processing_times.append(process_time)
|
||||
|
||||
# Update FPS
|
||||
now = time.time()
|
||||
self.frame_count += 1
|
||||
elapsed = now - self.start_time
|
||||
if elapsed > 0:
|
||||
self.actual_fps = self.frame_count / elapsed
|
||||
|
||||
fps_smoothed = 1.0 / (now - prev_time) if now > prev_time else 0
|
||||
prev_time = now
|
||||
|
||||
# Update metrics
|
||||
self.performance_metrics = {
|
||||
'FPS': f"{fps_smoothed:.1f}",
|
||||
'Detection (ms)': f"{detection_time:.1f}",
|
||||
'Violation (ms)': f"{violation_time:.1f}",
|
||||
'Total (ms)': f"{process_time:.1f}"
|
||||
}
|
||||
|
||||
# Store current frame data (thread-safe)
|
||||
self.mutex.lock()
|
||||
self.current_frame = frame.copy()
|
||||
self.current_detections = detections
|
||||
self.current_violations = violations
|
||||
self.mutex.unlock()
|
||||
|
||||
# Signal for raw data subscribers
|
||||
self.raw_frame_ready.emit(frame.copy(), detections, violations, fps_smoothed)
|
||||
|
||||
# Control processing rate for file sources
|
||||
if isinstance(self.source, str) and self.source_fps > 0:
|
||||
frame_duration = time.time() - process_start
|
||||
if frame_duration < frame_time:
|
||||
time.sleep(frame_time - frame_duration)
|
||||
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
except Exception as e:
|
||||
print(f"Video processing error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
finally:
|
||||
self._running = False
|
||||
if self.cap and self.cap.isOpened():
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
def _process_frame(self):
|
||||
"""Process current frame for UI rendering (called by timer)"""
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
# Debug counter
|
||||
if hasattr(self, 'debug_counter'):
|
||||
self.debug_counter += 1
|
||||
if self.debug_counter % 30 == 0: # Print every ~30 frames
|
||||
print(f"DEBUG: Frame processing iteration: {self.debug_counter}")
|
||||
|
||||
# Get frame data safely
|
||||
self.mutex.lock()
|
||||
frame = self.current_frame.copy() if self.current_frame is not None else None
|
||||
detections = self.current_detections.copy() if hasattr(self, 'current_detections') and self.current_detections else []
|
||||
violations = self.current_violations.copy() if hasattr(self, 'current_violations') and self.current_violations else []
|
||||
metrics = self.performance_metrics.copy()
|
||||
self.mutex.unlock()
|
||||
|
||||
if frame is None:
|
||||
print("DEBUG: _process_frame skipped - no frame available")
|
||||
return
|
||||
|
||||
try:
|
||||
# Annotate frame
|
||||
annotated_frame = frame.copy()
|
||||
if detections:
|
||||
annotated_frame = draw_detections(annotated_frame, detections, True, True)
|
||||
|
||||
# Draw metrics
|
||||
annotated_frame = draw_performance_metrics(annotated_frame, metrics)
|
||||
|
||||
# Resize for display
|
||||
display_frame = resize_frame_for_display(annotated_frame)
|
||||
|
||||
# Convert to QPixmap directly using a better approach
|
||||
rgb_image = cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB)
|
||||
h, w, ch = rgb_image.shape
|
||||
bytes_per_line = ch * w
|
||||
|
||||
# Create QImage - critical: use .copy() to ensure data stays valid
|
||||
q_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888).copy()
|
||||
|
||||
# Convert to pixmap
|
||||
pixmap = QPixmap.fromImage(q_image)
|
||||
|
||||
# Emit signal with processed frame
|
||||
if not pixmap.isNull():
|
||||
print(f"DEBUG: Emitting pixmap: {pixmap.width()}x{pixmap.height()}")
|
||||
self.frame_ready.emit(pixmap, detections, violations, metrics)
|
||||
else:
|
||||
print("ERROR: Created QPixmap is null")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR in _process_frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
3981
qt_app_pyside1/controllers/video_controller_finale.py
Normal file
3981
qt_app_pyside1/controllers/video_controller_finale.py
Normal file
File diff suppressed because it is too large
Load Diff
1673
qt_app_pyside1/controllers/video_controller_new.py
Normal file
1673
qt_app_pyside1/controllers/video_controller_new.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user