Clean push: Removed heavy files & added only latest snapshot
This commit is contained in:
265
qt_app_pyside1/test_redlight_violation.py
Normal file
265
qt_app_pyside1/test_redlight_violation.py
Normal file
@@ -0,0 +1,265 @@
|
||||
"""
|
||||
Red Light Violation Detection Test Script
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import argparse
|
||||
|
||||
# Add parent directory to path for imports
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
# Import utilities for crosswalk detection
|
||||
from qt_app_pyside.utils.crosswalk_utils import (
|
||||
detect_and_draw_crosswalk, # New advanced function with visualization
|
||||
detect_crosswalk,
|
||||
detect_stop_line,
|
||||
draw_violation_line,
|
||||
check_vehicle_violation
|
||||
)
|
||||
|
||||
# Import traffic light utilities
|
||||
from qt_app_pyside.utils.traffic_light_utils import detect_traffic_light_color, draw_traffic_light_status
|
||||
|
||||
def process_test_video(video_path):
|
||||
"""
|
||||
Process a test video to demonstrate red light violation detection.
|
||||
|
||||
Args:
|
||||
video_path: Path to the test video file
|
||||
"""
|
||||
# Open the video file
|
||||
cap = cv2.VideoCapture(video_path)
|
||||
if not cap.isOpened():
|
||||
print(f"Error: Could not open video file {video_path}")
|
||||
return
|
||||
|
||||
# Get video properties
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
|
||||
print(f"Video loaded: {width}x{height} @ {fps}fps")
|
||||
|
||||
# Create output directory for results
|
||||
output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_results")
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Create output video writer
|
||||
output_path = os.path.join(output_dir, "violation_detection_output.avi")
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
||||
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
||||
|
||||
# Detection state
|
||||
frame_count = 0
|
||||
violation_line_y = None
|
||||
traffic_light_color = "unknown"
|
||||
tracked_vehicles = {}
|
||||
violations = []
|
||||
|
||||
# Main processing loop
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
# Make a copy for annotation
|
||||
annotated_frame = frame.copy()
|
||||
|
||||
# Every 50 frames, attempt to detect crosswalk/stop line
|
||||
if frame_count % 50 == 0 or violation_line_y is None:
|
||||
# Use advanced function that visualizes the crosswalk
|
||||
annotated_frame, crosswalk_bbox, crosswalk_contours = detect_and_draw_crosswalk(frame)
|
||||
if crosswalk_bbox:
|
||||
violation_line_y = crosswalk_bbox[1] - 10 # 10px before crosswalk
|
||||
print(f"Detected crosswalk at y={violation_line_y}")
|
||||
else:
|
||||
# Try to detect stop line
|
||||
stop_line_y = detect_stop_line(frame)
|
||||
if stop_line_y:
|
||||
violation_line_y = stop_line_y - 10 # 10px before stop line
|
||||
print(f"Detected stop line at y={violation_line_y}")
|
||||
|
||||
# If still no violation line, use default
|
||||
if violation_line_y is None:
|
||||
violation_line_y = int(height * 0.75) # Default at 75% of height
|
||||
|
||||
# Draw violation line (make it always thick, visible, and labeled)
|
||||
line_color = (0, 0, 255) if traffic_light_color == "red" else (0, 255, 0)
|
||||
annotated_frame = draw_violation_line(
|
||||
annotated_frame, violation_line_y, color=line_color, thickness=10, style='solid', label=f"Violation Line: y={violation_line_y}")
|
||||
print(f"[DEBUG] Violation line drawn at y={violation_line_y}, color={line_color}, thickness=10")
|
||||
|
||||
# Demo traffic light detection
|
||||
# In a real app, you would get traffic light bbox from your detector
|
||||
# For this demo, we'll create a fake traffic light region in the corner
|
||||
|
||||
# Create a demo traffic light bounding box (top-right corner)
|
||||
traffic_light_bbox = [width-100, 50, width-20, 200]
|
||||
|
||||
# Every 10 frames, simulate traffic light detection
|
||||
# In a real app, you would detect the color from the video
|
||||
if frame_count % 10 == 0:
|
||||
# Alternate between colors for demonstration
|
||||
if traffic_light_color == "red":
|
||||
traffic_light_color = "green"
|
||||
elif traffic_light_color == "green":
|
||||
traffic_light_color = "yellow"
|
||||
elif traffic_light_color == "yellow":
|
||||
traffic_light_color = "red"
|
||||
else:
|
||||
traffic_light_color = "red" # Start with red
|
||||
|
||||
# Draw a sample traffic light for visualization
|
||||
light_height = traffic_light_bbox[3] - traffic_light_bbox[1]
|
||||
light_width = traffic_light_bbox[2] - traffic_light_bbox[0]
|
||||
|
||||
# Draw traffic light housing
|
||||
cv2.rectangle(annotated_frame,
|
||||
(traffic_light_bbox[0], traffic_light_bbox[1]),
|
||||
(traffic_light_bbox[2], traffic_light_bbox[3]),
|
||||
(100, 100, 100), -1)
|
||||
|
||||
# Draw the active light based on current color
|
||||
if traffic_light_color == "red":
|
||||
cv2.circle(annotated_frame,
|
||||
(traffic_light_bbox[0] + light_width//2,
|
||||
traffic_light_bbox[1] + light_height//4),
|
||||
light_width//3, (0, 0, 255), -1)
|
||||
elif traffic_light_color == "yellow":
|
||||
cv2.circle(annotated_frame,
|
||||
(traffic_light_bbox[0] + light_width//2,
|
||||
traffic_light_bbox[1] + light_height//2),
|
||||
light_width//3, (0, 255, 255), -1)
|
||||
elif traffic_light_color == "green":
|
||||
cv2.circle(annotated_frame,
|
||||
(traffic_light_bbox[0] + light_width//2,
|
||||
traffic_light_bbox[1] + 3*light_height//4),
|
||||
light_width//3, (0, 255, 0), -1)
|
||||
|
||||
# Use our improved function to visualize traffic light status
|
||||
annotated_frame = draw_traffic_light_status(annotated_frame, traffic_light_bbox, traffic_light_color)
|
||||
|
||||
# Display traffic light color
|
||||
cv2.putText(
|
||||
annotated_frame,
|
||||
f"Traffic Light: {traffic_light_color.upper()}",
|
||||
(50, 50),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 255) if traffic_light_color == "red" else
|
||||
(0, 255, 255) if traffic_light_color == "yellow" else
|
||||
(0, 255, 0),
|
||||
2
|
||||
)
|
||||
|
||||
# Every 5 frames, simulate vehicle detection
|
||||
if frame_count % 5 == 0:
|
||||
# Simulate vehicle moving from top to bottom
|
||||
vehicle_y = int((frame_count / 500.0) * height)
|
||||
vehicle_x = width // 2
|
||||
vehicle_width = 100
|
||||
vehicle_height = 80
|
||||
|
||||
# Create bounding box [x1, y1, x2, y2]
|
||||
bbox = [
|
||||
vehicle_x - vehicle_width // 2,
|
||||
vehicle_y - vehicle_height // 2,
|
||||
vehicle_x + vehicle_width // 2,
|
||||
vehicle_y + vehicle_height // 2
|
||||
]
|
||||
|
||||
# Draw vehicle bbox
|
||||
cv2.rectangle(
|
||||
annotated_frame,
|
||||
(bbox[0], bbox[1]),
|
||||
(bbox[2], bbox[3]),
|
||||
(0, 255, 0),
|
||||
2
|
||||
)
|
||||
|
||||
# Check for violation
|
||||
if (traffic_light_color == "red" and
|
||||
check_vehicle_violation(bbox, violation_line_y)):
|
||||
# Mark violation
|
||||
cv2.putText(
|
||||
annotated_frame,
|
||||
"RED LIGHT VIOLATION!",
|
||||
(bbox[0], bbox[1] - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.7,
|
||||
(0, 0, 255),
|
||||
2
|
||||
)
|
||||
|
||||
# Re-draw vehicle bbox in red
|
||||
cv2.rectangle(
|
||||
annotated_frame,
|
||||
(bbox[0], bbox[1]),
|
||||
(bbox[2], bbox[3]),
|
||||
(0, 0, 255),
|
||||
3
|
||||
)
|
||||
|
||||
# Save violation frame
|
||||
violation_path = os.path.join(output_dir, f"violation_{len(violations)}.jpg")
|
||||
cv2.imwrite(violation_path, frame)
|
||||
violations.append({
|
||||
"frame": frame_count,
|
||||
"bbox": bbox,
|
||||
"path": violation_path
|
||||
})
|
||||
|
||||
print(f"Violation detected at frame {frame_count}")
|
||||
|
||||
# Write the frame to output video
|
||||
out.write(annotated_frame)
|
||||
|
||||
# Display frame
|
||||
cv2.imshow('Red Light Violation Detection Test', annotated_frame)
|
||||
|
||||
# Check for exit key (q)
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
frame_count += 1
|
||||
|
||||
# Clean up
|
||||
cap.release()
|
||||
out.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
print(f"Processing complete. {len(violations)} violations detected.")
|
||||
print(f"Output video saved to: {output_path}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Parse command-line arguments
|
||||
parser = argparse.ArgumentParser(description='Test red light violation detection')
|
||||
parser.add_argument('--video', type=str, help='Path to test video file')
|
||||
args = parser.parse_args()
|
||||
|
||||
# If video path is provided, use it; otherwise download a sample
|
||||
video_path = args.video
|
||||
if not video_path or not os.path.exists(video_path):
|
||||
# Try to find a sample video in the workspace
|
||||
sample_paths = [
|
||||
"sample_data/traffic.mp4",
|
||||
"../sample_data/traffic.mp4",
|
||||
"test_videos/traffic_light.mp4",
|
||||
"../test_videos/traffic_light.mp4"
|
||||
]
|
||||
|
||||
for path in sample_paths:
|
||||
if os.path.exists(path):
|
||||
video_path = path
|
||||
break
|
||||
|
||||
if not video_path:
|
||||
print("Error: No video file specified. Please provide a path with --video")
|
||||
sys.exit(1)
|
||||
|
||||
process_test_video(video_path)
|
||||
Reference in New Issue
Block a user