Clean push: Removed heavy files & added only latest snapshot
This commit is contained in:
28
.dockerignore
Normal file
28
.dockerignore
Normal file
@@ -0,0 +1,28 @@
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
.Python
|
||||
build/
|
||||
dist/
|
||||
.git/
|
||||
*.db
|
||||
*.log
|
||||
*.pt
|
||||
*.bin
|
||||
*.onnx
|
||||
*.xml
|
||||
*.jpg
|
||||
*.png
|
||||
*.mp4
|
||||
datasets/
|
||||
DeepLabV3Plus-Pytorch/
|
||||
qt_app_pyside1/build/
|
||||
qt_app_pyside1/__pycache__/
|
||||
qt_app_pyside1/*.pt
|
||||
qt_app_pyside1/*.bin
|
||||
qt_app_pyside1/*.onnx
|
||||
qt_app_pyside1/*.xml
|
||||
qt_app_pyside1/*.jpg
|
||||
qt_app_pyside1/*.png
|
||||
qt_app_pyside1/*.mp4
|
||||
10
.gitattributes
vendored
Normal file
10
.gitattributes
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.xml filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.exe filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkg filter=lfs diff=lfs merge=lfs -text
|
||||
*.pyz filter=lfs diff=lfs merge=lfs -text
|
||||
*.html filter=lfs diff=lfs merge=lfs -text
|
||||
*.toc filter=lfs diff=lfs merge=lfs -text
|
||||
BIN
.gitignore
vendored
Normal file
BIN
.gitignore
vendored
Normal file
Binary file not shown.
13
.vscode/tasks.json
vendored
Normal file
13
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Run Qt PySide6 App",
|
||||
"type": "shell",
|
||||
"command": "cd d:\\Downloads\\qt_app_pyside\\khatam\\qt_app_pyside && python run_app.py",
|
||||
"group": "test",
|
||||
"isBackground": false,
|
||||
"problemMatcher": []
|
||||
}
|
||||
]
|
||||
}
|
||||
37
Dockerfile
Normal file
37
Dockerfile
Normal file
@@ -0,0 +1,37 @@
|
||||
FROM python:3.10-slim
|
||||
|
||||
# Install system dependencies for OpenCV, PySide6, OpenVINO, etc.
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ffmpeg \
|
||||
libgl1 \
|
||||
libegl1 \
|
||||
libglib2.0-0 \
|
||||
libsm6 \
|
||||
libxrender1 \
|
||||
libxext6 \
|
||||
xvfb \
|
||||
x11-apps \
|
||||
supervisor \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy requirements
|
||||
COPY qt_app_pyside/requirements.txt ./requirements.txt
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy only the files and folders actually used by the main app
|
||||
COPY qt_app_pyside/ ./qt_app_pyside/
|
||||
COPY main.py ./main.py
|
||||
COPY config.json ./config.json
|
||||
COPY detection_openvino.py ./detection_openvino.py
|
||||
COPY utils.py ./utils.py
|
||||
COPY yolo11n.pt ./yolo11n.pt
|
||||
COPY yolo11x.bin ./yolo11x.bin
|
||||
COPY yolo11x.pt ./yolo11x.pt
|
||||
COPY yolo11x.xml ./yolo11x.xml
|
||||
|
||||
# Set the entrypoint to the main app
|
||||
CMD ["python", "qt_app_pyside/main.py"]
|
||||
57
README.md
Normal file
57
README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# **Traffic Intersection Monitoring System with OpenVINO**
|
||||
|
||||
This project implements a real-time traffic monitoring solution that detects vehicles, pedestrians, and traffic violations at intersections using object detection models optimized with OpenVINO. It features a PyQt5-based dashboard for visualization and control, integrates synthetic data generation using CARLA, and supports enhanced scene understanding through vision-language models.
|
||||
|
||||
## Problem Statement
|
||||
|
||||
The system monitors traffic intersections to identify and track vehicles, pedestrians, and cyclists in real-time. It collects traffic statistics and detects violations such as red-light running and jaywalking. The focus is on efficient deployment at the edge using Intel hardware.
|
||||
|
||||
## Objectives
|
||||
|
||||
- Detect vehicles, pedestrians, and cyclists using object detection
|
||||
- Monitor and record traffic violations in real-time
|
||||
- Display detection results and statistics through a graphical interface
|
||||
- Enable model deployment using OpenVINO for optimized inference
|
||||
- Generate and annotate synthetic traffic data using CARLA
|
||||
- Integrate visual reasoning capabilities through vision-language models
|
||||
|
||||
## Training and Optimization
|
||||
|
||||
1. **Model Training**
|
||||
The YOLOv12 model is trained using PyTorch with labeled image data representing traffic scenes.
|
||||
|
||||
2. **Export Pipeline**
|
||||
The trained model is exported to ONNX format, and then converted to OpenVINO's Intermediate Representation (IR) format.
|
||||
|
||||
3. **Optimization**
|
||||
Post-training quantization is applied to convert the model from FP32 to INT8, improving inference speed while maintaining accuracy.
|
||||
|
||||
4. **Deployment**
|
||||
OpenVINO's InferRequest API is used for asynchronous inference, enabling efficient frame-by-frame processing suitable for real-time applications.
|
||||
|
||||
## Synthetic Data Generation
|
||||
|
||||
CARLA is used to simulate traffic intersections with accurate layouts, signage, and weather variations. It supports:
|
||||
|
||||
- Scene diversity through environmental changes (rain, fog, glare, nighttime)
|
||||
- Simulation of pedestrian and vehicle behaviors (red-light running, jaywalking)
|
||||
- Automatic annotation of bounding boxes and class labels for use with object detection models
|
||||
|
||||
## Vision-Language Integration
|
||||
|
||||
Two models are integrated to enhance scene understanding:
|
||||
|
||||
- **BLIP-2**: Automatically generates text summaries of visual scenes (e.g., “A vehicle is crossing the red light”)
|
||||
- **LLaVA**: Enables question-answering over video frames (e.g., “Why was the pedestrian flagged?”)
|
||||
|
||||
These tools allow human operators to interact with the system more effectively by supporting natural language explanations and queries.
|
||||
|
||||
## PyQt5-Based Dashboard
|
||||
|
||||
The dashboard enables real-time interaction with the monitoring system and includes:
|
||||
|
||||
- Live video feed with overlayed bounding boxes
|
||||
- Detection tags for pedestrians, vehicles, and violators
|
||||
- Violation statistics and traffic flow metrics
|
||||
- Controls for switching between camera sources and simulated environments
|
||||
- High-performance rendering using QPainter for dynamic visual updates
|
||||
21
Week1.md
Normal file
21
Week1.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# GSOC-25: Traffic Intersection Monitoring with OpenVINO
|
||||
|
||||
This project develops a real-time system to detect traffic objects at intersections. It uses YOLOv11 and YOLOv12 deep learning models optimized with OpenVINO to identify vehicles, pedestrians, and traffic signs efficiently on Intel hardware.
|
||||
|
||||
## Current Progress (Week 1)
|
||||
|
||||
- Built the main detection pipeline
|
||||
- Tested different YOLO models for accuracy and speed
|
||||
- Created vehicle classification based on size and shape
|
||||
- Developed image processing and visualization tools
|
||||
- Added tracking to maintain object consistency between frames
|
||||
- Implemented filtering to remove false positives and overlapping detections
|
||||
|
||||
## FeaturesD:\Downloads\finale6\khatam\qt_app_pyside
|
||||
|
||||
- Train custom YOLOv12n models using traffic data from the COCO dataset
|
||||
- Convert models from PyTorch format to OpenVINO IR format
|
||||
- Quantize models to INT8 for faster inference without losing accuracy
|
||||
- Run detection on images, video files, and webcam streams
|
||||
- Detect common traffic classes such as cars, trucks, pedestrians, and traffic lights
|
||||
- Deploy models on CPU, GPU, and other OpenVINO-supported devices
|
||||
0
all-files.txt
Normal file
0
all-files.txt
Normal file
94
annotation_utils.py
Normal file
94
annotation_utils.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# Utility for drawing detections, tracks, and violations on frames
|
||||
import utils
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
import numpy as np
|
||||
from PySide6.QtGui import QPixmap
|
||||
from .annotation_utils import resize_frame_for_display, convert_cv_to_pixmap
|
||||
|
||||
def enhanced_annotate_frame(app, frame, detections, violations):
|
||||
import cv2
|
||||
if frame is None or not isinstance(frame, np.ndarray) or frame.size == 0:
|
||||
return np.zeros((300, 300, 3), dtype=np.uint8)
|
||||
annotated_frame = frame.copy()
|
||||
if detections is None:
|
||||
detections = []
|
||||
if violations is None:
|
||||
violations = []
|
||||
if len(detections) > 0:
|
||||
if hasattr(app, 'tracker') and app.tracker:
|
||||
try:
|
||||
ds_dets = []
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
try:
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
x1, y1, x2, y2 = bbox
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
if w <= 0 or h <= 0:
|
||||
continue
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_name = det.get('class_name', 'unknown')
|
||||
ds_dets.append(([x1, y1, w, h], conf, class_name))
|
||||
except Exception:
|
||||
continue
|
||||
if ds_dets:
|
||||
tracks = app.tracker.update_tracks(ds_dets, frame=frame.copy())
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
tid = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
try:
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
dx1, dy1, dx2, dy2 = bbox
|
||||
iou = utils.bbox_iou((dx1, dy1, dx2, dy2), tuple(map(int, ltrb)))
|
||||
if iou > 0.5:
|
||||
det['track_id'] = tid
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
# IMPORTANT: All OpenCV drawing (including violation line) must be done on BGR frame before converting to RGB/QImage/QPixmap.
|
||||
# Example usage in pipeline:
|
||||
# 1. Draw violation line and all overlays on annotated_frame (BGR)
|
||||
# 2. Resize for display: display_frame = resize_frame_for_display(annotated_frame, ...)
|
||||
# 3. Convert to QPixmap: pixmap = convert_cv_to_pixmap(display_frame) or enhanced_cv_to_pixmap(display_frame)
|
||||
# Do NOT convert to RGB before drawing overlays!
|
||||
try:
|
||||
show_labels = app.config.get('display', {}).get('show_labels', True)
|
||||
show_confidence = app.config.get('display', {}).get('show_confidence', True)
|
||||
annotated_frame = utils.draw_detections(annotated_frame, detections, show_labels, show_confidence)
|
||||
annotated_frame = utils.draw_violations(annotated_frame, violations)
|
||||
return annotated_frame
|
||||
except Exception:
|
||||
return frame.copy()
|
||||
|
||||
# def pipeline_with_violation_line(frame: np.ndarray, draw_violation_line_func, violation_line_y: int = None) -> QPixmap:
|
||||
# """
|
||||
# Example pipeline to ensure violation line is drawn and color order is correct.
|
||||
# Args:
|
||||
# frame: Input BGR frame (np.ndarray)
|
||||
# draw_violation_line_func: Function to draw violation line (should accept BGR frame)
|
||||
# violation_line_y: Y position for the violation line (int)
|
||||
# Returns:
|
||||
# QPixmap ready for display
|
||||
# """
|
||||
# # 1. Draw violation line and overlays on BGR frame
|
||||
# annotated_frame = frame.copy()
|
||||
# if violation_line_y is not None:
|
||||
# annotated_frame = draw_violation_line_func(annotated_frame, violation_line_y, color=(0, 0, 255), label='VIOLATION LINE')
|
||||
# # 2. Resize for display
|
||||
# display_frame = resize_frame_for_display(annotated_frame, max_width=1280, max_height=720)
|
||||
# # 3. Convert to QPixmap (handles BGR->RGB)
|
||||
# pixmap = convert_cv_to_pixmap(display_frame)
|
||||
# return pixmap
|
||||
24
config.json
Normal file
24
config.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"detection": {
|
||||
"confidence_threshold": 0.5,
|
||||
"enable_ocr": true,
|
||||
"enable_tracking": true,
|
||||
"model_path": "rcb/yolo11x.pt"
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": true,
|
||||
"show_labels": true,
|
||||
"show_license_plates": true,
|
||||
"show_overlay_text": false
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
}
|
||||
}
|
||||
80
convert_model.py
Normal file
80
convert_model.py
Normal file
@@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import argparse
|
||||
|
||||
try:
|
||||
from ultralytics import YOLO
|
||||
except ImportError:
|
||||
print("Installing ultralytics...")
|
||||
os.system('pip install --quiet "ultralytics>=8.0.0"')
|
||||
from ultralytics import YOLO
|
||||
|
||||
def convert_pt_to_openvino(model_path: str, output_dir: str = None, half: bool = False):
|
||||
"""
|
||||
Convert PyTorch model to OpenVINO IR format.
|
||||
|
||||
Args:
|
||||
model_path: Path to PyTorch .pt model file
|
||||
output_dir: Directory to save converted model (default is same as model with _openvino_model suffix)
|
||||
half: Whether to use half precision (FP16)
|
||||
|
||||
Returns:
|
||||
Path to the converted XML file
|
||||
"""
|
||||
# Validate model path
|
||||
model_path = Path(model_path)
|
||||
if not model_path.exists():
|
||||
raise FileNotFoundError(f"Model file not found: {model_path}")
|
||||
|
||||
# Get model name without extension for output directory
|
||||
model_name = model_path.stem
|
||||
|
||||
# Set output directory
|
||||
if output_dir:
|
||||
output_dir = Path(output_dir)
|
||||
output_dir.mkdir(exist_ok=True, parents=True)
|
||||
# We'll still use model_name for the file names
|
||||
else:
|
||||
output_dir = model_path.parent / f"{model_name}_openvino_model"
|
||||
|
||||
ov_xml = output_dir / f"{model_name}.xml"
|
||||
|
||||
# Check if model already exists
|
||||
if ov_xml.exists():
|
||||
print(f"OpenVINO model already exists: {ov_xml}")
|
||||
print(f"To reconvert, delete or rename the existing files.")
|
||||
return str(ov_xml)
|
||||
|
||||
# Load model and export
|
||||
print(f"Loading model: {model_path}")
|
||||
model = YOLO(str(model_path))
|
||||
|
||||
print(f"Exporting to OpenVINO IR format...")
|
||||
print(f"Output directory: {output_dir}")
|
||||
print(f"Using half precision: {half}")
|
||||
|
||||
# Export the model (will create both .xml and .bin files)
|
||||
model.export(format="openvino", dynamic=True, half=half, imgsz=640)
|
||||
|
||||
# Verify files were created
|
||||
if ov_xml.exists():
|
||||
print(f"✅ Conversion successful!")
|
||||
print(f"XML file: {ov_xml}")
|
||||
print(f"BIN file: {ov_xml.with_suffix('.bin')}")
|
||||
return str(ov_xml)
|
||||
else:
|
||||
print(f"❌ Conversion failed - output files not found")
|
||||
return None
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Convert YOLO PyTorch models to OpenVINO IR format")
|
||||
parser.add_argument("model_path", type=str, help="Path to PyTorch .pt model file")
|
||||
parser.add_argument("--output", type=str, default=None, help="Directory to save converted model")
|
||||
parser.add_argument("--half", action="store_true", help="Use half precision (FP16)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
convert_pt_to_openvino(args.model_path, args.output, args.half)
|
||||
99
convert_yolo11n.py
Normal file
99
convert_yolo11n.py
Normal file
@@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
# Add current directory to path
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append(current_dir)
|
||||
|
||||
# Import the conversion function from detection_openvino.py
|
||||
from detection_openvino import convert_yolo_to_openvino
|
||||
|
||||
def main():
|
||||
"""
|
||||
Convert yolo11n.pt model to OpenVINO IR format.
|
||||
Usage: python convert_yolo11n.py
|
||||
"""
|
||||
print("\n" + "="*80)
|
||||
print("YOLO11n Model Converter - PyTorch to OpenVINO IR")
|
||||
print("="*80)
|
||||
# Check if the model exists
|
||||
model_path = Path("yolo11n.pt")
|
||||
if not model_path.exists():
|
||||
print(f"❌ Error: Model file {model_path} not found!")
|
||||
print(f" Please ensure '{model_path}' is in the current directory.")
|
||||
return
|
||||
|
||||
print(f"✅ Found model: {model_path}")
|
||||
|
||||
# Check for OpenVINO and other dependencies
|
||||
try:
|
||||
import openvino as ov
|
||||
print(f"✅ OpenVINO version: {ov.__version__}")
|
||||
except ImportError:
|
||||
print("⚠️ OpenVINO not installed. Installing now...")
|
||||
os.system('pip install --quiet "openvino>=2024.0.0"')
|
||||
import openvino as ov
|
||||
print(f"✅ OpenVINO installed: {ov.__version__}")
|
||||
|
||||
try:
|
||||
from ultralytics import YOLO
|
||||
except ImportError:
|
||||
print("⚠️ Ultralytics not installed. Installing now...")
|
||||
os.system('pip install --quiet "ultralytics>=8.0.0"')
|
||||
from ultralytics import YOLO
|
||||
print("✅ Ultralytics installed")
|
||||
|
||||
# Create destination directory for the models
|
||||
openvino_dir = Path("openvino_models")
|
||||
if not openvino_dir.exists():
|
||||
openvino_dir.mkdir(exist_ok=True)
|
||||
print(f"✅ Created directory: {openvino_dir}")
|
||||
|
||||
try:
|
||||
# Convert model to OpenVINO IR format
|
||||
print("\n📦 Converting model to OpenVINO IR format...")
|
||||
start_time = time.time()
|
||||
output_path = convert_yolo_to_openvino("yolo11n", half=True)
|
||||
conversion_time = time.time() - start_time
|
||||
|
||||
print(f"✅ Conversion completed in {conversion_time:.2f} seconds!")
|
||||
print(f"✅ Output model: {output_path}")
|
||||
|
||||
# Verify output files
|
||||
if output_path and Path(output_path).exists():
|
||||
xml_path = Path(output_path)
|
||||
bin_path = xml_path.with_suffix('.bin')
|
||||
xml_size = xml_path.stat().st_size / (1024 * 1024) # in MB
|
||||
bin_size = bin_path.stat().st_size / (1024 * 1024) # in MB
|
||||
|
||||
print(f"✅ XML file: {xml_path} ({xml_size:.2f} MB)")
|
||||
print(f"✅ BIN file: {bin_path} ({bin_size:.2f} MB)")
|
||||
|
||||
# Copy to openvino_models directory for easier access by the Qt app
|
||||
dst_xml = openvino_dir / xml_path.name
|
||||
dst_bin = openvino_dir / bin_path.name
|
||||
|
||||
shutil.copy2(xml_path, dst_xml)
|
||||
shutil.copy2(bin_path, dst_bin)
|
||||
|
||||
print(f"✅ Copied models to: {openvino_dir}")
|
||||
print("\n🚀 Model conversion and setup complete!")
|
||||
print("\n📋 Instructions:")
|
||||
print(f" 1. The model files are available at: {openvino_dir}")
|
||||
print(" 2. In the Qt app, you can now select this model from the dropdown")
|
||||
print(" 3. Use the device selection dropdown to choose between CPU and GPU")
|
||||
else:
|
||||
print("❌ Failed to verify output files.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error converting model: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
267
deploy.py
Normal file
267
deploy.py
Normal file
@@ -0,0 +1,267 @@
|
||||
"""
|
||||
Deployment script for packaging the Qt app as a standalone executable
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import platform
|
||||
from pathlib import Path
|
||||
|
||||
# Get the current directory (where this script is)
|
||||
CURRENT_DIR = Path(__file__).parent.absolute()
|
||||
APP_DIR = CURRENT_DIR / "qt_app_pyside"
|
||||
|
||||
# Determine platform-specific details
|
||||
PLATFORM = platform.system()
|
||||
IS_WINDOWS = PLATFORM == "Windows"
|
||||
IS_LINUX = PLATFORM == "Linux"
|
||||
IS_MACOS = PLATFORM == "Darwin"
|
||||
|
||||
# Path separator for PyInstaller
|
||||
PATH_SEP = ";" if IS_WINDOWS else ":"
|
||||
|
||||
def find_resource_files():
|
||||
"""Find UI, QRC, and other resource files"""
|
||||
resources = []
|
||||
|
||||
# Process UI files
|
||||
ui_files = list(APP_DIR.glob("**/*.ui"))
|
||||
for ui_file in ui_files:
|
||||
rel_path = ui_file.relative_to(CURRENT_DIR)
|
||||
print(f"Found UI file: {rel_path}")
|
||||
# Convert UI files to Python
|
||||
output_path = ui_file.with_suffix(".py")
|
||||
convert_ui_cmd = f"pyside6-uic {ui_file} -o {output_path}"
|
||||
print(f"Converting UI: {convert_ui_cmd}")
|
||||
os.system(convert_ui_cmd)
|
||||
|
||||
# Process QRC files (resource files)
|
||||
qrc_files = list(APP_DIR.glob("**/*.qrc"))
|
||||
for qrc_file in qrc_files:
|
||||
rel_path = qrc_file.relative_to(CURRENT_DIR)
|
||||
print(f"Found QRC file: {rel_path}")
|
||||
# Convert QRC files to Python
|
||||
output_path = qrc_file.with_suffix("_rc.py")
|
||||
convert_qrc_cmd = f"pyside6-rcc {qrc_file} -o {output_path}"
|
||||
print(f"Converting QRC: {convert_qrc_cmd}")
|
||||
os.system(convert_qrc_cmd)
|
||||
|
||||
# Find asset directories
|
||||
asset_dirs = [
|
||||
"assets",
|
||||
"resources",
|
||||
"images",
|
||||
"icons",
|
||||
"themes",
|
||||
"models"
|
||||
]
|
||||
|
||||
data_files = []
|
||||
for asset_dir in asset_dirs:
|
||||
full_path = APP_DIR / asset_dir
|
||||
if full_path.exists() and full_path.is_dir():
|
||||
rel_path = full_path.relative_to(CURRENT_DIR)
|
||||
data_files.append(f"{rel_path}{PATH_SEP}{rel_path}")
|
||||
print(f"Found asset directory: {rel_path}")
|
||||
|
||||
# Include specific model directories from root if they exist
|
||||
root_model_dirs = [
|
||||
"models/yolo11x_openvino_model",
|
||||
"openvino_models",
|
||||
"yolo11x_openvino_model"
|
||||
]
|
||||
|
||||
for model_dir in root_model_dirs:
|
||||
model_path = Path(CURRENT_DIR) / model_dir
|
||||
if model_path.exists() and model_path.is_dir():
|
||||
data_files.append(f"{model_dir}{PATH_SEP}{model_dir}")
|
||||
print(f"Found model directory: {model_dir}")
|
||||
|
||||
# Find specific asset files
|
||||
asset_extensions = [".png", ".ico", ".jpg", ".svg", ".json", ".xml", ".bin", ".qss"]
|
||||
for ext in asset_extensions:
|
||||
for asset_file in APP_DIR.glob(f"**/*{ext}"):
|
||||
# Skip files in asset directories we've already included
|
||||
if any(dir_name in str(asset_file) for dir_name in asset_dirs):
|
||||
continue
|
||||
|
||||
# Include individual file
|
||||
rel_path = asset_file.relative_to(CURRENT_DIR)
|
||||
dir_path = rel_path.parent
|
||||
data_files.append(f"{rel_path}{PATH_SEP}{dir_path}")
|
||||
print(f"Found asset file: {rel_path}")
|
||||
|
||||
return data_files
|
||||
|
||||
def create_spec_file(data_files, main_script="main.py"):
|
||||
"""Create a PyInstaller spec file"""
|
||||
spec_path = CURRENT_DIR / "qt_app.spec" # Format data_files for the spec file
|
||||
formatted_data_files = []
|
||||
for data_file in data_files:
|
||||
src, dst = data_file.split(PATH_SEP)
|
||||
# Ensure correct escaping for Windows paths
|
||||
if IS_WINDOWS:
|
||||
src = src.replace('\\', '\\\\')
|
||||
dst = dst.replace('\\', '\\\\')
|
||||
formatted_data_files.append(f"(r'{src}', r'{dst}')")
|
||||
|
||||
data_files_str = ", ".join(formatted_data_files)
|
||||
# Main script location
|
||||
main_script_path = APP_DIR / main_script
|
||||
if not main_script_path.exists():
|
||||
print(f"ERROR: Main script not found at {main_script_path}")
|
||||
sys.exit(1)
|
||||
|
||||
# Convert path to string with proper escaping
|
||||
main_script_path_str = str(main_script_path)
|
||||
# Icon file
|
||||
icon_file = str(APP_DIR / "resources" / "icon.ico") if IS_WINDOWS else str(APP_DIR / "resources" / "icon.icns")
|
||||
if not Path(icon_file).exists():
|
||||
icon_file = None
|
||||
print("No icon file found. Continuing without an icon.")
|
||||
|
||||
spec_content = f"""# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
block_cipher = None
|
||||
|
||||
a = Analysis(
|
||||
[r'{main_script_path_str}'],
|
||||
pathex=['{CURRENT_DIR}'],
|
||||
binaries=[],
|
||||
datas=[{data_files_str}],
|
||||
hiddenimports=['PySide6.QtCore', 'PySide6.QtGui', 'PySide6.QtWidgets'],
|
||||
hookspath=[],
|
||||
hooksconfig={{}},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
[], name='traffic_monitoring_app',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=False,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
"""
|
||||
|
||||
# Add icon if it exists
|
||||
if icon_file:
|
||||
spec_content += f" icon=r'{icon_file}',\n"
|
||||
|
||||
spec_content += ")\n\n"
|
||||
|
||||
# For macOS, create app bundle
|
||||
if IS_MACOS:
|
||||
spec_content += f"""app = BUNDLE(exe,
|
||||
name="TrafficMonitoring.app",
|
||||
icon={icon_file},
|
||||
)
|
||||
"""
|
||||
|
||||
with open(spec_path, "w") as f:
|
||||
f.write(spec_content)
|
||||
|
||||
print(f"Created PyInstaller spec file: {spec_path}")
|
||||
return spec_path
|
||||
|
||||
def create_splash_screen_script():
|
||||
"""Create a splash screen script"""
|
||||
splash_script = APP_DIR / "splash.py"
|
||||
|
||||
content = """from PySide6.QtWidgets import QApplication, QSplashScreen
|
||||
from PySide6.QtCore import Qt, QTimer
|
||||
from PySide6.QtGui import QPixmap
|
||||
import sys
|
||||
import os
|
||||
|
||||
def show_splash():
|
||||
app = QApplication(sys.argv)
|
||||
|
||||
# Get the directory of the executable or script
|
||||
if getattr(sys, 'frozen', False):
|
||||
# Running as compiled executable
|
||||
app_dir = os.path.dirname(sys.executable)
|
||||
else:
|
||||
# Running as script
|
||||
app_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Look for splash image
|
||||
splash_image = os.path.join(app_dir, 'resources', 'splash.png')
|
||||
if not os.path.exists(splash_image):
|
||||
splash_image = os.path.join(app_dir, 'splash.png')
|
||||
if not os.path.exists(splash_image):
|
||||
return None
|
||||
|
||||
# Create splash screen
|
||||
pixmap = QPixmap(splash_image)
|
||||
splash = QSplashScreen(pixmap, Qt.WindowStaysOnTopHint)
|
||||
splash.show()
|
||||
app.processEvents()
|
||||
|
||||
return splash, app
|
||||
|
||||
if __name__ == "__main__":
|
||||
# This is for testing the splash screen independently
|
||||
splash, app = show_splash()
|
||||
|
||||
# Close the splash after 3 seconds
|
||||
QTimer.singleShot(3000, splash.close)
|
||||
|
||||
sys.exit(app.exec())
|
||||
"""
|
||||
|
||||
with open(splash_script, "w") as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Created splash screen script: {splash_script}")
|
||||
return splash_script
|
||||
|
||||
def run_pyinstaller(spec_file):
|
||||
"""Run PyInstaller with the spec file"""
|
||||
cmd = f"pyinstaller --clean {spec_file}"
|
||||
print(f"Running PyInstaller: {cmd}")
|
||||
os.system(cmd)
|
||||
|
||||
def main():
|
||||
# Create splash screen script
|
||||
create_splash_screen_script()
|
||||
|
||||
# Find resource files
|
||||
data_files = find_resource_files()
|
||||
|
||||
# Create spec file
|
||||
spec_file = create_spec_file(data_files)
|
||||
|
||||
# Install PyInstaller if not already installed
|
||||
os.system("pip install pyinstaller")
|
||||
|
||||
# Run PyInstaller
|
||||
run_pyinstaller(spec_file)
|
||||
|
||||
# Output success message
|
||||
print("\n" + "="*50)
|
||||
print("Build complete! Your executable is in the dist/ folder.")
|
||||
print("="*50)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1176
detection_openvino.py
Normal file
1176
detection_openvino.py
Normal file
File diff suppressed because it is too large
Load Diff
1694
detection_openvino_async.py
Normal file
1694
detection_openvino_async.py
Normal file
File diff suppressed because it is too large
Load Diff
32
docker-compose.yml
Normal file
32
docker-compose.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
version: "3.8"
|
||||
services:
|
||||
detector:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: traffic-detector:latest
|
||||
environment:
|
||||
- MODEL_PATH=/app/yolo11x.xml
|
||||
volumes:
|
||||
- ./models:/app/models
|
||||
command: ["python", "detection_openvino.py"]
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2g
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: traffic-app:latest
|
||||
depends_on:
|
||||
- detector
|
||||
environment:
|
||||
- DETECTOR_API=http://detector:8000
|
||||
command: ["python", "qt_app_pyside/main.py"]
|
||||
ports:
|
||||
- "8501:8501"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2g
|
||||
236
fallback_annotation_utils.py
Normal file
236
fallback_annotation_utils.py
Normal file
@@ -0,0 +1,236 @@
|
||||
"""
|
||||
Fallback annotation utilities for enhanced video controller.
|
||||
This module provides basic implementation of the annotation functions
|
||||
required by the enhanced video controller, in case the regular module fails to import.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import cv2
|
||||
import numpy as np
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Any, Optional
|
||||
try:
|
||||
from PySide6.QtGui import QImage, QPixmap
|
||||
from PySide6.QtCore import Qt
|
||||
QT_AVAILABLE = True
|
||||
except ImportError:
|
||||
print("⚠️ PySide6 not available, some functions will be limited")
|
||||
QT_AVAILABLE = False
|
||||
|
||||
# Color mapping for traffic-related classes
|
||||
COLORS = {
|
||||
'person': (255, 165, 0), # Orange
|
||||
'bicycle': (255, 0, 255), # Magenta
|
||||
'car': (0, 255, 0), # Green
|
||||
'motorcycle': (255, 255, 0), # Cyan
|
||||
'bus': (0, 0, 255), # Red
|
||||
'truck': (0, 128, 255), # Orange-Blue
|
||||
'traffic light': (0, 165, 255), # Orange
|
||||
'stop sign': (0, 0, 139), # Dark Red
|
||||
'parking meter': (128, 0, 128), # Purple
|
||||
'default': (0, 255, 255) # Yellow as default
|
||||
}
|
||||
|
||||
def enhanced_draw_detections(frame: np.ndarray, detections: List[Dict],
|
||||
show_confidence: bool = True,
|
||||
show_labels: bool = True) -> np.ndarray:
|
||||
"""
|
||||
Draw detections on frame with enhanced visuals.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
detections: List of detection dictionaries
|
||||
show_confidence: Whether to show confidence values
|
||||
show_labels: Whether to show class labels
|
||||
|
||||
Returns:
|
||||
Frame with detections drawn
|
||||
"""
|
||||
if not detections:
|
||||
return frame
|
||||
|
||||
# Create a copy of the frame
|
||||
result = frame.copy()
|
||||
|
||||
# Process each detection
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
# Get bounding box
|
||||
x1, y1, x2, y2 = map(int, det['bbox'])
|
||||
|
||||
# Get class name and confidence
|
||||
class_name = det.get('class_name', 'unknown')
|
||||
conf = det.get('confidence', 0)
|
||||
|
||||
# Get color for this class
|
||||
color = COLORS.get(class_name.lower(), COLORS['default'])
|
||||
|
||||
# Draw bounding box
|
||||
cv2.rectangle(result, (x1, y1), (x2, y2), color, 2)
|
||||
|
||||
# Prepare label text
|
||||
label = ""
|
||||
if show_labels:
|
||||
label = class_name
|
||||
if show_confidence:
|
||||
label = f"{class_name} ({conf:.2f})"
|
||||
elif 'track_id' in det:
|
||||
label = f"{class_name} #{det['track_id']}"
|
||||
elif show_confidence:
|
||||
label = f"{conf:.2f}"
|
||||
elif 'track_id' in det:
|
||||
label = f"#{det['track_id']}"
|
||||
|
||||
# Draw label if we have one
|
||||
if label:
|
||||
# Calculate label size and position
|
||||
(label_width, label_height), baseline = cv2.getTextSize(
|
||||
label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
|
||||
|
||||
# Draw label background
|
||||
cv2.rectangle(
|
||||
result,
|
||||
(x1, y1),
|
||||
(x1 + label_width, y1 - label_height - baseline - 5),
|
||||
color,
|
||||
-1
|
||||
)
|
||||
|
||||
# Draw label text
|
||||
cv2.putText(
|
||||
result,
|
||||
label,
|
||||
(x1, y1 - baseline - 5),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.5,
|
||||
(255, 255, 255),
|
||||
1
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def draw_performance_overlay(frame: np.ndarray, metrics: Dict[str, Any]) -> np.ndarray:
|
||||
"""
|
||||
Draw performance metrics overlay on frame.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
metrics: Dictionary of performance metrics
|
||||
|
||||
Returns:
|
||||
Frame with performance overlay
|
||||
"""
|
||||
if not metrics:
|
||||
return frame
|
||||
|
||||
# Create a copy of the frame
|
||||
result = frame.copy()
|
||||
|
||||
# Get frame dimensions
|
||||
height, width = frame.shape[:2]
|
||||
|
||||
# Extract metrics
|
||||
fps = metrics.get('fps', 0)
|
||||
inference_fps = metrics.get('inference_fps', 0)
|
||||
inference_time = metrics.get('inference_time', 0)
|
||||
|
||||
# Format text
|
||||
text_lines = [
|
||||
f"FPS: {fps:.1f}",
|
||||
f"Inference: {inference_time:.1f}ms ({inference_fps:.1f} FPS)",
|
||||
]
|
||||
|
||||
# Draw semi-transparent background
|
||||
overlay = result.copy()
|
||||
bg_height = 30 + (len(text_lines) - 1) * 20
|
||||
cv2.rectangle(overlay, (10, 10), (250, 10 + bg_height), (0, 0, 0), -1)
|
||||
cv2.addWeighted(overlay, 0.7, result, 0.3, 0, result)
|
||||
|
||||
# Draw text lines
|
||||
y = 30
|
||||
for text in text_lines:
|
||||
cv2.putText(
|
||||
result,
|
||||
text,
|
||||
(20, y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.6,
|
||||
(255, 255, 255),
|
||||
1,
|
||||
cv2.LINE_AA
|
||||
)
|
||||
y += 20
|
||||
|
||||
return result
|
||||
|
||||
# Qt-specific helper functions
|
||||
def enhanced_cv_to_qimage(cv_img: np.ndarray) -> Optional['QImage']:
|
||||
"""
|
||||
Convert OpenCV image to QImage with enhanced handling.
|
||||
|
||||
Args:
|
||||
cv_img: OpenCV image (numpy array)
|
||||
|
||||
Returns:
|
||||
QImage or None if conversion failed
|
||||
"""
|
||||
if not QT_AVAILABLE:
|
||||
print("⚠️ Cannot convert to QImage: PySide6 not available")
|
||||
return None
|
||||
|
||||
if cv_img is None or cv_img.size == 0:
|
||||
print("⚠️ Cannot convert empty image to QImage")
|
||||
return None
|
||||
|
||||
try:
|
||||
height, width, channels = cv_img.shape
|
||||
|
||||
# Ensure we're dealing with RGB or RGBA
|
||||
if channels == 3:
|
||||
# OpenCV uses BGR, we need RGB for QImage
|
||||
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
|
||||
format = QImage.Format_RGB888
|
||||
elif channels == 4:
|
||||
# OpenCV uses BGRA, we need RGBA for QImage
|
||||
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGRA2RGBA)
|
||||
format = QImage.Format_RGBA8888
|
||||
else:
|
||||
print(f"⚠️ Unsupported image format with {channels} channels")
|
||||
return None
|
||||
|
||||
# Create QImage from numpy array
|
||||
steps = width * channels
|
||||
return QImage(cv_img.data, width, height, steps, format)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error converting image to QImage: {e}")
|
||||
return None
|
||||
|
||||
def enhanced_cv_to_pixmap(cv_img: np.ndarray) -> Optional['QPixmap']:
|
||||
"""
|
||||
Convert OpenCV image to QPixmap with enhanced handling.
|
||||
|
||||
Args:
|
||||
cv_img: OpenCV image (numpy array)
|
||||
|
||||
Returns:
|
||||
QPixmap or None if conversion failed
|
||||
"""
|
||||
if not QT_AVAILABLE:
|
||||
print("⚠️ Cannot convert to QPixmap: PySide6 not available")
|
||||
return None
|
||||
|
||||
# Convert to QImage first
|
||||
qimg = enhanced_cv_to_qimage(cv_img)
|
||||
if qimg is None:
|
||||
return None
|
||||
|
||||
# Convert QImage to QPixmap
|
||||
try:
|
||||
return QPixmap.fromImage(qimg)
|
||||
except Exception as e:
|
||||
print(f"❌ Error converting QImage to QPixmap: {e}")
|
||||
return None
|
||||
715
finale.md
Normal file
715
finale.md
Normal file
@@ -0,0 +1,715 @@
|
||||
# Traffic Monitoring System: End-to-End Pipeline Documentation (Deep Dive)
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. Introduction
|
||||
2. E2E Pipeline Overview
|
||||
3. VIDEO INPUT
|
||||
4. FRAME PREPROCESSING
|
||||
5. YOLO DETECTION
|
||||
6. BYTETRACK TRACKING
|
||||
7. TRAFFIC LIGHT DETECTION
|
||||
8. CROSSWALK DETECTION
|
||||
9. VIOLATION ANALYSIS
|
||||
10. UI VISUALIZATION
|
||||
11. LOGGING & STORAGE
|
||||
12. DEVICE & MODEL SWITCHING
|
||||
13. ANALYTICS & PERFORMANCE MONITORING
|
||||
14. SYSTEM ANALYSIS & REPORTING
|
||||
15. CONFIGURATION & EXTENSIBILITY
|
||||
16. ERROR HANDLING & FALLBACKS
|
||||
17. PACKAGING & DEPLOYMENT
|
||||
18. Developer Notes & Best Practices
|
||||
19. Example Data Flows
|
||||
20. Glossary
|
||||
21. Application Implementation Architecture & Deployment
|
||||
22. Migration to Containers & Microservices: Practical Guide
|
||||
|
||||
---
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
This document is a comprehensive, code-mapped, and developer-friendly guide to the traffic video analytics system implemented in the `khatam` project. It covers every stage of the E2E pipeline, from video input to logging and storage, and explains the logic, function definitions, and data flow in detail. The goal is to make the system architecture, data flow, and component responsibilities clear and accessible for developers, maintainers, and reviewers.
|
||||
|
||||
---
|
||||
|
||||
## 2. E2E Pipeline Overview
|
||||
|
||||
```
|
||||
📹 VIDEO INPUT
|
||||
↓ (CPU)
|
||||
🔍 FRAME PREPROCESSING
|
||||
↓ (CPU → GPU/NPU)
|
||||
🤖 YOLO DETECTION
|
||||
↓ (CPU)
|
||||
🎯 BYTETRACK TRACKING
|
||||
↓ (CPU)
|
||||
🚦 TRAFFIC LIGHT DETECTION
|
||||
↓ (CPU)
|
||||
🚶 CROSSWALK DETECTION
|
||||
↓ (CPU)
|
||||
⚖️ VIOLATION ANALYSIS
|
||||
↓ (CPU)
|
||||
🖼️ UI VISUALIZATION
|
||||
↓ (CPU)
|
||||
💾 LOGGING & STORAGE
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. VIDEO INPUT (Deep Dive)
|
||||
|
||||
### Main Classes and Responsibilities
|
||||
|
||||
- **MainWindow / EnhancedMainWindow**: Entry point for the UI, connects user actions (open file, start/stop, select camera) to the video controller.
|
||||
- **VideoController**: Handles all video source logic. Maintains state (current source, frame index, FPS, etc.), manages OpenCV capture object, and emits frames via Qt signals.
|
||||
- **Signal Flow**: User action → MainWindow slot → VideoController method → emits `frame_ready` signal → downstream slots (preprocessing, analytics, UI).
|
||||
|
||||
### Key Methods
|
||||
|
||||
- `__init__`: Initializes capture state, sets up signals/slots.
|
||||
- `start_capture(source)`: Opens the video source, sets up a timer or thread for frame reading.
|
||||
- `read_frame()`: Reads a frame, handles errors (end of stream, device disconnect), emits frame.
|
||||
- `stop_capture()`: Releases resources, stops timers/threads.
|
||||
|
||||
### Error Handling
|
||||
|
||||
- If the video source fails (file not found, camera error), emits an error signal to the UI.
|
||||
- If end-of-stream is reached, can loop, stop, or prompt the user.
|
||||
|
||||
### Example Signal Connection
|
||||
|
||||
```python
|
||||
self.video_controller.frame_ready.connect(self.on_frame_ready)
|
||||
```
|
||||
|
||||
### Example: Handling Multiple Sources
|
||||
|
||||
```python
|
||||
def start_capture(self, source):
|
||||
if isinstance(source, int): # Webcam
|
||||
self.cap = cv2.VideoCapture(source)
|
||||
elif isinstance(source, str): # File or RTSP
|
||||
self.cap = cv2.VideoCapture(source)
|
||||
# ... handle errors, set FPS, etc.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. FRAME PREPROCESSING (Deep Dive)
|
||||
|
||||
### Preprocessing Pipeline
|
||||
|
||||
- **Resize**: Ensures frame matches model input size (e.g., 640x640 for YOLOv11n).
|
||||
- **Color Conversion**: Converts BGR (OpenCV default) to RGB or other formats as required.
|
||||
- **Normalization**: Scales pixel values to [0, 1] or [-1, 1] as needed by the model.
|
||||
- **Padding/Cropping**: Maintains aspect ratio or fits model input shape.
|
||||
- **Device Transfer**: If using GPU/NPU, may convert frame to appropriate memory space (e.g., OpenVINO blob, CUDA tensor).
|
||||
|
||||
### Example: Preprocessing Function
|
||||
|
||||
```python
|
||||
def preprocess(frame, input_shape):
|
||||
# Resize
|
||||
frame = cv2.resize(frame, input_shape)
|
||||
# Convert color
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
# Normalize
|
||||
frame = frame.astype(np.float32) / 255.0
|
||||
# (Optional) Pad/crop
|
||||
# (Optional) Convert to OpenVINO blob
|
||||
return frame
|
||||
```
|
||||
|
||||
### Integration with Device Selection
|
||||
|
||||
- If the model is running on GPU/NPU, preprocessing may include conversion to device-specific format.
|
||||
- Device selection logic (in ModelManager) determines if preprocessing should prepare data for CPU, GPU, or NPU.
|
||||
|
||||
### Error Handling
|
||||
|
||||
- If frame is None or invalid, preprocessing returns a default or skips the frame.
|
||||
- Handles exceptions in color conversion or resizing gracefully.
|
||||
|
||||
---
|
||||
|
||||
## 5. YOLO DETECTION (Deep Dive)
|
||||
|
||||
### Model Loading and Compilation
|
||||
|
||||
- **ModelManager**: Responsible for loading YOLOv11 models, compiling with OpenVINO, and managing device selection.
|
||||
- **OpenVINO Core**: Used to read and compile models for CPU, GPU, or NPU.
|
||||
- **Model Switching**: If performance drops, ModelManager can switch to a lighter model or different device.
|
||||
|
||||
### Inference Logic
|
||||
|
||||
- Receives preprocessed frame.
|
||||
- Runs inference using OpenVINO's `compiled_model([input_tensor])`.
|
||||
- Parses output to extract bounding boxes, class labels, and confidence scores.
|
||||
|
||||
### Example: Detection Function
|
||||
|
||||
```python
|
||||
def detect_vehicles(self, frame):
|
||||
input_tensor = self.preprocess(frame)
|
||||
output = self.compiled_model([input_tensor])[self.output_layer]
|
||||
detections = self.postprocess(output, frame.shape)
|
||||
return detections
|
||||
```
|
||||
|
||||
### Device/Model Switching
|
||||
|
||||
- If FPS < threshold or latency > threshold, triggers `switch_device()` or `switch_model()`.
|
||||
- Switch events are logged and visualized in the UI.
|
||||
|
||||
### Error Handling
|
||||
|
||||
- If inference fails, logs error and may fallback to CPU or a lighter model.
|
||||
- Handles device unavailability and model loading errors.
|
||||
|
||||
---
|
||||
|
||||
## 6. BYTETRACK TRACKING
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/controllers/video_controller_new.py`
|
||||
- `qt_app_pyside/bytetrack/`
|
||||
|
||||
### Description
|
||||
|
||||
Detected objects are passed to the ByteTrack tracker for multi-object tracking. ByteTrack assigns unique IDs to objects and maintains their trajectories across frames. Tracking is performed on the CPU for efficiency. The tracker handles object association, lost/found logic, and ID management.
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`ByteTrackTracker.update(detections)`**: Updates the tracker with new detections.
|
||||
- **`VideoController._track_objects()`**: Manages the tracking process.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Detected objects received from the YOLO detection stage.
|
||||
2. Objects are passed to the ByteTrack tracker.
|
||||
3. Tracker updates object states and IDs.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def update(self, detections):
|
||||
for detection in detections:
|
||||
if detection.confidence > self.confidence_threshold:
|
||||
self.tracked_objects.append(detection)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. TRAFFIC LIGHT DETECTION
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/utils/traffic_light_utils.py`
|
||||
- `qt_app_pyside/red_light_violation_pipeline.py`
|
||||
|
||||
### Description
|
||||
|
||||
Specialized logic detects the state and position of traffic lights in the frame. May use color thresholding, region-of-interest analysis, or a dedicated model. Results are used for violation analysis (e.g., red light running).
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`detect_traffic_lights(frame)`**: Detects traffic lights in the frame.
|
||||
- **`RedLightViolationPipeline.process_traffic_lights()`**: Processes and analyzes traffic light data.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Frame with detected objects received from the tracking stage.
|
||||
2. Traffic light detection applied to the frame.
|
||||
3. Results used for violation analysis.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def detect_traffic_lights(frame):
|
||||
# Convert to HSV and threshold for red color
|
||||
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
|
||||
mask = cv2.inRange(hsv, LOWER_RED, UPPER_RED)
|
||||
return mask
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. CROSSWALK DETECTION
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/utils/crosswalk_utils_advanced.py`
|
||||
- `qt_app_pyside/utils/crosswalk_utils2.py`
|
||||
|
||||
### Description
|
||||
|
||||
Detects crosswalks using image processing or deep learning. Used to determine pedestrian zones and for violation logic.
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`detect_crosswalks(frame)`**: Detects crosswalks in the frame.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Frame with detected objects received from the tracking stage.
|
||||
2. Crosswalk detection applied to the frame.
|
||||
3. Results used for violation analysis and UI visualization.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def detect_crosswalks(frame):
|
||||
# Use Hough Transform to detect lines that form crosswalks
|
||||
lines = cv2.HoughLinesP(frame, 1, np.pi / 180, threshold=100)
|
||||
return lines
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9. VIOLATION ANALYSIS
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/red_light_violation_pipeline.py`
|
||||
- `qt_app_pyside/violation_openvino.py`
|
||||
|
||||
### Description
|
||||
|
||||
Combines tracking, traffic light, and crosswalk data to detect violations (e.g., red light running, crosswalk violations). Applies rule-based or ML-based logic to determine if a violation occurred. Results are logged and visualized.
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`RedLightViolationPipeline.analyze_violations()`**: Analyzes potential violations.
|
||||
- **`ViolationAnalyzer.process()`**: Processes violations for logging and visualization.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Tracked objects and traffic light states received.
|
||||
2. Violation analysis applied based on rules or ML models.
|
||||
3. Violations are logged and may trigger alerts or actions.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def analyze_violations(self):
|
||||
for track in self.tracks:
|
||||
if track.violation_flag:
|
||||
self.violations.append(track)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. UI VISUALIZATION
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/main.py`
|
||||
- `qt_app_pyside/enhanced_main_window.py`
|
||||
- `qt_app_pyside/ui/analytics_tab.py`
|
||||
- `qt_app_pyside/ui/performance_graphs.py`
|
||||
|
||||
### Description
|
||||
|
||||
The PySide6 UI displays the video, overlays detections, tracks, and violation markers. Real-time analytics (FPS, latency, counts) are shown in dedicated tabs. Performance graphs update live using signals from the analytics controller. Device/model switches and latency spikes are visualized.
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`MainWindow.display_frame()`**: Displays the current frame in the UI.
|
||||
- **`AnalyticsTab.update_charts()`**: Updates analytics charts with new data.
|
||||
- **`PerformanceGraphsWidget.update_metrics()`**: Updates performance metrics in the UI.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Processed frame with overlays ready from the violation analysis stage.
|
||||
2. Frame displayed in the UI with real-time updates for analytics and performance.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def display_frame(self, frame):
|
||||
# Convert the frame to QImage and display in the label
|
||||
height, width, channel = frame.shape
|
||||
bytes_per_line = 3 * width
|
||||
qimg = QImage(frame.data, width, height, bytes_per_line, QImage.Format_RGB888)
|
||||
self.video_label.setPixmap(QPixmap.fromImage(qimg))
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. LOGGING & STORAGE
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/annotation_utils.py`
|
||||
- `qt_app_pyside/logging_utils.py`
|
||||
- `qt_app_pyside/analytics_controller.py`
|
||||
|
||||
### Description
|
||||
|
||||
All detections, tracks, violations, and analytics are logged to disk (JSON, CSV, or database). System analysis and performance reports are saved for later review. Logging is handled asynchronously to avoid blocking the main pipeline.
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`AnalyticsController.save_report()`**: Saves the analytics report to disk.
|
||||
- **`LoggingUtils.log_event()`**: Logs events and metrics to the configured sink.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Detection, tracking, and violation data generated.
|
||||
2. Data logged asynchronously to the configured storage (file, database).
|
||||
3. Reports and analytics data saved for review and debugging.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def log_event(self, event_data):
|
||||
# Append the event data to the log file
|
||||
with open(self.log_file, 'a') as f:
|
||||
json.dump(event_data, f)
|
||||
f.write('\n')
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 12. DEVICE & MODEL SWITCHING
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/controllers/model_manager.py`
|
||||
- `qt_app_pyside/controllers/analytics_controller.py`
|
||||
|
||||
### Description
|
||||
|
||||
The system monitors FPS, latency, and resource usage. If performance drops (e.g., FPS < threshold, high latency), the model or device is switched automatically. Device switch events are logged and visualized in the UI.
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`ModelManager.switch_device()`**: Switches the device for model inference.
|
||||
- **`AnalyticsController.update_device()`**: Updates the device configuration based on performance.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Performance metrics monitored in real time.
|
||||
2. If metrics exceed thresholds, device or model is switched.
|
||||
3. New device/model is used for subsequent inference and processing.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def switch_device(self, new_device):
|
||||
self.current_device = new_device
|
||||
# Reinitialize the model with the new device
|
||||
self.model = Core().compile_model(self.model, new_device)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 13. ANALYTICS & PERFORMANCE MONITORING
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/controllers/analytics_controller.py`
|
||||
- `qt_app_pyside/ui/performance_graphs.py`
|
||||
- `qt_app_pyside/system_metrics_monitor.py`
|
||||
|
||||
### Description
|
||||
|
||||
The analytics controller collects per-frame and aggregated metrics (FPS, latency, counts, spikes). Live system metrics (CPU/RAM) are collected using `psutil` and included in analytics data. All metrics are emitted via Qt signals to update the UI in real time.
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`AnalyticsController.process_frame_data()`**: Processes and emits frame-level analytics data.
|
||||
- **`AnalyticsController.get_latency_statistics()`**: Returns latency statistics for analysis.
|
||||
- **`SystemMetricsMonitor.get_cpu_ram_metrics()`**: Collects CPU and RAM usage metrics.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Frame processing completes, and analytics data is ready.
|
||||
2. Data is emitted via signals to update UI components (charts, labels).
|
||||
3. System metrics are collected and displayed in real time.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def process_frame_data(self, frame_data):
|
||||
# Calculate FPS and latency
|
||||
self.fps = 1.0 / (time.time() - self.last_frame_time)
|
||||
self.last_frame_time = time.time()
|
||||
# Emit the new metrics
|
||||
self.fps_changed.emit(self.fps)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 14. SYSTEM ANALYSIS & REPORTING
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/system_analysis.py`
|
||||
|
||||
### Description
|
||||
|
||||
Provides comprehensive system and pipeline analysis, including platform specs, pipeline architecture, tracking performance, latency spikes, model switching, and optimization recommendations. Generates and saves detailed reports for debugging and optimization.
|
||||
|
||||
### Key Functions
|
||||
|
||||
- **`TrafficMonitoringAnalyzer.generate_comprehensive_report()`**: Generates a detailed report of the system's performance and configuration.
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. System and pipeline data is collected.
|
||||
2. Analysis is performed to identify issues and optimizations.
|
||||
3. Reports are generated and saved for review.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
def generate_comprehensive_report(self):
|
||||
# Collect data from all relevant sources
|
||||
data = self.collect_data()
|
||||
# Analyze the data and generate a report
|
||||
report = self.analyze_data(data)
|
||||
# Save the report to a file
|
||||
with open(self.report_file, 'w') as f:
|
||||
f.write(report)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 15. CONFIGURATION & EXTENSIBILITY
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/config.json`
|
||||
- `qt_app_pyside/requirements.txt`
|
||||
- `qt_app_pyside/build_exe.py`
|
||||
|
||||
### Description
|
||||
|
||||
All model, device, and pipeline parameters are configurable via JSON and command-line arguments. The system is designed for easy extension (new models, trackers, analytics).
|
||||
|
||||
---
|
||||
|
||||
## 16. ERROR HANDLING & FALLBACKS
|
||||
|
||||
### Code Location
|
||||
|
||||
- All major modules
|
||||
|
||||
### Description
|
||||
|
||||
Robust error handling ensures the pipeline continues running even if a component fails. Fallbacks are in place for device switching, model loading, and analytics.
|
||||
|
||||
---
|
||||
|
||||
## 17. PACKAGING & DEPLOYMENT
|
||||
|
||||
### Code Location
|
||||
|
||||
- `qt_app_pyside/qt_app.spec`
|
||||
- `qt_app_pyside/build_exe.py`
|
||||
- `qt_app_pyside/requirements.txt`
|
||||
|
||||
### Description
|
||||
|
||||
The application is packaged as a single executable using PyInstaller. All dependencies, models, and resources are bundled for easy deployment.
|
||||
|
||||
---
|
||||
|
||||
## 18. Developer Notes & Best Practices
|
||||
|
||||
- Use virtual environments to manage dependencies (`venv`, `conda`).
|
||||
- Regularly update models and dependencies for best performance and features.
|
||||
- Monitor system performance and adjust device/model configurations as needed.
|
||||
- Refer to the code comments and function docstrings for detailed logic and usage.
|
||||
|
||||
---
|
||||
|
||||
## 19. Example Data Flows
|
||||
|
||||
### 19.1. From Video File
|
||||
|
||||
1. User selects a video file in the UI.
|
||||
2. `VideoController` opens the file and starts reading frames.
|
||||
3. Frames are preprocessed and passed to the YOLO detection model.
|
||||
4. Detected objects are tracked, and violations are analyzed.
|
||||
5. Results are logged, and analytics are updated in the UI.
|
||||
|
||||
### 19.2. From Webcam
|
||||
|
||||
1. User selects the webcam as the video source.
|
||||
2. `VideoController` initializes the webcam stream.
|
||||
3. Frames are captured and processed in real time.
|
||||
4. Detected objects and violations are displayed in the UI.
|
||||
5. Performance metrics are logged and visualized.
|
||||
|
||||
---
|
||||
|
||||
## 20. Glossary
|
||||
|
||||
- **E2E**: End-to-End, referring to the complete pipeline from video input to logging and storage.
|
||||
- **YOLO**: You Only Look Once, a real-time object detection system.
|
||||
- **ByteTrack**: A multi-object tracking algorithm.
|
||||
- **OpenVINO**: Open Visual Inference and Neural Network Optimization, a toolkit for optimizing and deploying AI inference.
|
||||
- **Qt**: A free and open-source widget toolkit for creating graphical user interfaces as well as non-GUI programs.
|
||||
|
||||
---
|
||||
|
||||
## 21. Application Implementation Architecture & Deployment
|
||||
|
||||
### Monolithic Desktop Application
|
||||
|
||||
- The traffic monitoring system is implemented as a **monolithic desktop application** using Python and PySide6 (Qt for Python).
|
||||
- All major components (video input, detection, tracking, analytics, UI, logging) are integrated into a single process and codebase.
|
||||
|
||||
### Containers
|
||||
|
||||
- **No containers are used** in the standard deployment. The application is designed to run directly on Windows (and optionally Linux) as a standalone executable.
|
||||
- All dependencies (Python runtime, libraries, models) are bundled using PyInstaller, so users do not need Docker or other container runtimes.
|
||||
|
||||
### Microservices
|
||||
|
||||
- **No microservices are used**. The architecture is not distributed; all logic runs in a single process.
|
||||
- Communication between components is handled via Python function calls and Qt signals/slots, not via network APIs or service calls.
|
||||
|
||||
### Rationale
|
||||
|
||||
- This design is chosen for ease of deployment, real-time performance, and simplicity for end users (e.g., traffic authorities, researchers).
|
||||
- The system can be extended to use microservices or containers for cloud-based or distributed deployments, but the current implementation is optimized for local, real-time desktop use.
|
||||
|
||||
### Extensibility
|
||||
|
||||
- The codebase is modular, so individual components (e.g., detection, analytics, UI) can be refactored into microservices if needed in the future.
|
||||
- For large-scale deployments (e.g., city-wide monitoring), a distributed architecture with containers and microservices could be considered, but is not present in the current version.
|
||||
|
||||
### Summary Table
|
||||
|
||||
| Aspect | Implementation |
|
||||
| -------------- | ----------------------------- |
|
||||
| Containerized? | No |
|
||||
| Microservices? | No (Monolithic) |
|
||||
| Platform | Windows Desktop (PyInstaller) |
|
||||
| UI Framework | PySide6 (Qt for Python) |
|
||||
| Deployment | Single executable |
|
||||
|
||||
---
|
||||
|
||||
# Conclusion
|
||||
|
||||
This documentation provides a detailed, code-mapped explanation of the traffic monitoring system's E2E pipeline. Each stage is modular, extensible, and robust, with clear separation of concerns and real-time analytics for performance monitoring and optimization. For further details, refer to the code comments and function docstrings in each module.
|
||||
|
||||
---
|
||||
|
||||
## 22. How to Move from Conda to Containers & Microservices: Step-by-Step Guide
|
||||
|
||||
### 1️⃣ Identify and Modularize Services
|
||||
|
||||
- **Detection Service**: Handles frame input, runs YOLOv11, returns detections (bounding boxes, classes, scores).
|
||||
- **Tracking Service**: Accepts detections, runs ByteTrack/DeepSORT, returns tracked IDs and trajectories.
|
||||
- **Analytics Service**: Processes tracking data, computes counts, violations, and aggregates.
|
||||
- **UI Service**: (Optional) PySide6 desktop UI or a web UI (Flask/FastAPI + React/Vue).
|
||||
|
||||
**Action:**
|
||||
|
||||
- Refactor your codebase so each of these is a separate Python module or folder with a clear entry point (e.g., `detector.py`, `tracker.py`, `analytics.py`).
|
||||
|
||||
### 2️⃣ Replace Conda with Docker for Environment Management
|
||||
|
||||
- Write a `requirements.txt` using `pip freeze > requirements.txt` inside your Conda environment.
|
||||
- Remove any Conda-specific packages from `requirements.txt` (e.g., `conda`, `conda-package-handling`).
|
||||
- Create a `Dockerfile`:
|
||||
|
||||
```dockerfile
|
||||
FROM python:3.10-slim
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ffmpeg \
|
||||
libgl1 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
WORKDIR /app
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
COPY . .
|
||||
CMD ["python", "main.py"] # Replace with your entry point
|
||||
```
|
||||
|
||||
- Build and run:
|
||||
|
||||
```bash
|
||||
docker build -t traffic-monitor .
|
||||
docker run --rm -it traffic-monitor
|
||||
```
|
||||
|
||||
### 3️⃣ Add REST APIs for Microservices
|
||||
|
||||
- Use FastAPI or Flask in each service to expose endpoints:
|
||||
- `/detect` for detection
|
||||
- `/track` for tracking
|
||||
- `/analyze` for analytics
|
||||
- Example (FastAPI):
|
||||
|
||||
```python
|
||||
from fastapi import FastAPI, File, UploadFile
|
||||
app = FastAPI()
|
||||
@app.post("/detect")
|
||||
def detect(file: UploadFile = File(...)):
|
||||
# Run detection logic
|
||||
return {"detections": ...}
|
||||
```
|
||||
|
||||
- The UI/controller sends HTTP requests to these endpoints using `requests` or `httpx`.
|
||||
|
||||
### 4️⃣ Orchestrate with Docker Compose
|
||||
|
||||
- Create a `docker-compose.yml` to run all services together:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
services:
|
||||
detector:
|
||||
build: ./detector
|
||||
ports: ["8001:8000"]
|
||||
tracker:
|
||||
build: ./tracker
|
||||
ports: ["8002:8000"]
|
||||
analytics:
|
||||
build: ./analytics
|
||||
ports: ["8003:8000"]
|
||||
ui:
|
||||
build: ./ui
|
||||
ports: ["8501:8501"]
|
||||
```
|
||||
|
||||
- Now you can start all services with `docker-compose up`.
|
||||
|
||||
### 5️⃣ (Optional) Scale with Kubernetes
|
||||
|
||||
- For large deployments, write Kubernetes manifests to deploy and scale each service.
|
||||
- Use cloud GPU nodes for detection, CPU nodes for analytics/UI.
|
||||
|
||||
### 6️⃣ Practical Migration Steps
|
||||
|
||||
- Start by containerizing your current monolithic app (single Dockerfile).
|
||||
- Refactor detection, tracking, analytics into separate modules/services.
|
||||
- Add REST APIs to each service.
|
||||
- Use Docker Compose for local multi-service testing.
|
||||
- Gradually move to cloud or edge as needed.
|
||||
|
||||
### 7️⃣ Resources
|
||||
|
||||
- [Docker Official Docs](https://docs.docker.com/)
|
||||
- [FastAPI Docs](https://fastapi.tiangolo.com/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/)
|
||||
- [Kubernetes Docs](https://kubernetes.io/docs/)
|
||||
|
||||
---
|
||||
|
||||
**Summary:**
|
||||
|
||||
- Containers replace Conda for environment management and make deployment portable.
|
||||
- Microservices make your system modular, scalable, and cloud/edge-ready.
|
||||
- Start with Docker, then add REST APIs, then orchestrate with Docker Compose/Kubernetes.
|
||||
- This approach prepares your project for production, research, and smart city scale.
|
||||
16
kernel.errors.txt
Normal file
16
kernel.errors.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
Instruction / Operand / Region Errors:
|
||||
|
||||
/-------------------------------------------!!!KERNEL HEADER ERRORS FOUND!!!-------------------------------------------\
|
||||
Error in CISA routine with name: kernel
|
||||
Error Message: Input V38 = [256, 260) intersects with V37 = [256, 260)
|
||||
\----------------------------------------------------------------------------------------------------------------------/
|
||||
|
||||
|
||||
/-------------------------------------------!!!KERNEL HEADER ERRORS FOUND!!!-------------------------------------------\
|
||||
Error in CISA routine with name: kernel
|
||||
Error Message: Explicit input 2 must not follow an implicit input 0
|
||||
\----------------------------------------------------------------------------------------------------------------------/
|
||||
|
||||
|
||||
|
||||
|
||||
101
models/yolo11x_openvino_model/metadata.yaml
Normal file
101
models/yolo11x_openvino_model/metadata.yaml
Normal file
@@ -0,0 +1,101 @@
|
||||
description: Ultralytics YOLO11x model trained on /ultralytics/ultralytics/cfg/datasets/coco.yaml
|
||||
author: Ultralytics
|
||||
date: '2025-06-09T03:51:12.423573'
|
||||
version: 8.3.151
|
||||
license: AGPL-3.0 License (https://ultralytics.com/license)
|
||||
docs: https://docs.ultralytics.com
|
||||
stride: 32
|
||||
task: detect
|
||||
batch: 1
|
||||
imgsz:
|
||||
- 640
|
||||
- 640
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
args:
|
||||
batch: 1
|
||||
fraction: 1.0
|
||||
half: true
|
||||
int8: false
|
||||
dynamic: true
|
||||
nms: false
|
||||
channels: 3
|
||||
BIN
models/yolo11x_openvino_model/yolo11x.bin
LFS
Normal file
BIN
models/yolo11x_openvino_model/yolo11x.bin
LFS
Normal file
Binary file not shown.
BIN
models/yolo11x_openvino_model/yolo11x.xml
LFS
Normal file
BIN
models/yolo11x_openvino_model/yolo11x.xml
LFS
Normal file
Binary file not shown.
176
optimize_models.py
Normal file
176
optimize_models.py
Normal file
@@ -0,0 +1,176 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Check and optimize OpenVINO models to FP16 precision.
|
||||
This script checks if the models are using FP16 precision and converts them if needed.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import xml.etree.ElementTree as ET
|
||||
from pathlib import Path
|
||||
|
||||
# Add current directory to path
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append(current_dir)
|
||||
|
||||
def check_model_precision(model_path):
|
||||
"""
|
||||
Check if the model is using FP16 precision.
|
||||
|
||||
Args:
|
||||
model_path: Path to the model XML file
|
||||
|
||||
Returns:
|
||||
Tuple of (is_fp16, num_fp32_layers, num_total_layers)
|
||||
"""
|
||||
if not Path(model_path).exists():
|
||||
print(f"❌ Model file {model_path} not found!")
|
||||
return False, 0, 0
|
||||
|
||||
tree = ET.parse(model_path)
|
||||
root = tree.getroot()
|
||||
|
||||
fp32_layers = 0
|
||||
total_layers = 0
|
||||
|
||||
# Check layers precision
|
||||
for layer in root.findall(".//layer"):
|
||||
total_layers += 1
|
||||
precision = layer.get("precision")
|
||||
if precision == "FP32":
|
||||
fp32_layers += 1
|
||||
|
||||
is_fp16 = fp32_layers == 0
|
||||
|
||||
return is_fp16, fp32_layers, total_layers
|
||||
|
||||
def convert_to_fp16(model_path):
|
||||
"""
|
||||
Convert OpenVINO model to FP16 precision.
|
||||
|
||||
Args:
|
||||
model_path: Path to the model XML file
|
||||
|
||||
Returns:
|
||||
Path to the converted model
|
||||
"""
|
||||
try:
|
||||
from openvino.tools import mo
|
||||
|
||||
print(f"🔄 Converting model to FP16: {model_path}")
|
||||
|
||||
# Get paths
|
||||
xml_path = Path(model_path)
|
||||
bin_path = xml_path.with_suffix('.bin')
|
||||
output_dir = xml_path.parent
|
||||
|
||||
if not xml_path.exists() or not bin_path.exists():
|
||||
print(f"❌ Model files not found: {xml_path} or {bin_path}")
|
||||
return None
|
||||
|
||||
# Run model optimizer to convert to FP16
|
||||
args = [
|
||||
"--input_model", str(xml_path),
|
||||
"--output_dir", str(output_dir),
|
||||
"--data_type", "FP16"
|
||||
]
|
||||
|
||||
print(f"⚙️ Running Model Optimizer with args: {args}")
|
||||
start_time = time.time()
|
||||
mo.main(args)
|
||||
conversion_time = time.time() - start_time
|
||||
|
||||
print(f"✅ Model converted to FP16 in {conversion_time:.2f} seconds")
|
||||
|
||||
return model_path
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error converting model: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
def optimize_model(model_path):
|
||||
"""
|
||||
Check and optimize model to FP16 precision if needed.
|
||||
|
||||
Args:
|
||||
model_path: Path to the model XML file
|
||||
|
||||
Returns:
|
||||
Path to the optimized model
|
||||
"""
|
||||
if not Path(model_path).exists():
|
||||
print(f"❌ Model file {model_path} not found!")
|
||||
return None
|
||||
|
||||
print(f"🔍 Checking model precision: {model_path}")
|
||||
is_fp16, fp32_layers, total_layers = check_model_precision(model_path)
|
||||
|
||||
if is_fp16:
|
||||
print(f"✅ Model is already using FP16 precision: {model_path}")
|
||||
return model_path
|
||||
else:
|
||||
print(f"⚠️ Model using FP32 precision ({fp32_layers}/{total_layers} layers). Converting to FP16...")
|
||||
return convert_to_fp16(model_path)
|
||||
|
||||
def main():
|
||||
"""
|
||||
Check and optimize all OpenVINO models in the workspace.
|
||||
"""
|
||||
print("\n" + "="*80)
|
||||
print("OpenVINO Model Optimizer - FP32 to FP16 Converter")
|
||||
print("="*80)
|
||||
|
||||
# Check for OpenVINO
|
||||
try:
|
||||
import openvino as ov
|
||||
print(f"✅ OpenVINO version: {ov.__version__}")
|
||||
except ImportError:
|
||||
print("⚠️ OpenVINO not installed. Installing now...")
|
||||
os.system('pip install --quiet "openvino>=2024.0.0"')
|
||||
import openvino as ov
|
||||
print(f"✅ OpenVINO installed: {ov.__version__}")
|
||||
|
||||
# Find OpenVINO models
|
||||
search_dirs = [
|
||||
".",
|
||||
"openvino_models",
|
||||
"models",
|
||||
"../openvino_models"
|
||||
]
|
||||
|
||||
print("🔍 Searching for OpenVINO models...")
|
||||
|
||||
models_found = []
|
||||
for search_dir in search_dirs:
|
||||
search_path = Path(search_dir)
|
||||
if not search_path.exists():
|
||||
continue
|
||||
|
||||
# Find XML files
|
||||
for xml_file in search_path.glob("**/*.xml"):
|
||||
if "openvino" in str(xml_file).lower() or "yolo" in str(xml_file).lower():
|
||||
models_found.append(xml_file)
|
||||
|
||||
if not models_found:
|
||||
print("❌ No OpenVINO models found!")
|
||||
return
|
||||
|
||||
print(f"✅ Found {len(models_found)} OpenVINO models:")
|
||||
for i, model_path in enumerate(models_found):
|
||||
print(f" {i+1}. {model_path}")
|
||||
|
||||
# Process each model
|
||||
optimized_models = []
|
||||
for model_path in models_found:
|
||||
optimized_path = optimize_model(model_path)
|
||||
if optimized_path:
|
||||
optimized_models.append(optimized_path)
|
||||
|
||||
print(f"\n✅ Optimized {len(optimized_models)} models")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
43
qt_app.spec
Normal file
43
qt_app.spec
Normal file
@@ -0,0 +1,43 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
block_cipher = None
|
||||
|
||||
a = Analysis(
|
||||
[r'D:\Downloads\finale6\khatam\qt_app_pyside\main.py'],
|
||||
pathex=['D:\Downloads\finale6\khatam'],
|
||||
binaries=[],
|
||||
datas=[(r'qt_app_pyside\\resources', r'qt_app_pyside\\resources'), (r'models/yolo11x_openvino_model', r'models/yolo11x_openvino_model'), (r'openvino_models', r'openvino_models'), (r'yolo11x_openvino_model', r'yolo11x_openvino_model'), (r'qt_app_pyside\\config.json', r'qt_app_pyside')],
|
||||
hiddenimports=['PySide6.QtCore', 'PySide6.QtGui', 'PySide6.QtWidgets'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
[], name='traffic_monitoring_app',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=False,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
|
||||
10
qt_app_pyside1/.dockerignore
Normal file
10
qt_app_pyside1/.dockerignore
Normal file
@@ -0,0 +1,10 @@
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
.vscode/
|
||||
.env
|
||||
.git/
|
||||
logs/
|
||||
dist/
|
||||
build/
|
||||
*.spec
|
||||
BIN
qt_app_pyside1/Checkpoints/best_deeplabv3plus_mobilenet_cityscapes_os16.pth
LFS
Normal file
BIN
qt_app_pyside1/Checkpoints/best_deeplabv3plus_mobilenet_cityscapes_os16.pth
LFS
Normal file
Binary file not shown.
38
qt_app_pyside1/Dockerfile
Normal file
38
qt_app_pyside1/Dockerfile
Normal file
@@ -0,0 +1,38 @@
|
||||
# Dockerfile for qt_app_pyside1 (optimized)
|
||||
FROM python:3.10-slim
|
||||
|
||||
# Install system dependencies for OpenCV, PySide6, OpenVINO, X11 GUI, and supervisor
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ffmpeg \
|
||||
libgl1 \
|
||||
libegl1 \
|
||||
libglib2.0-0 \
|
||||
libsm6 \
|
||||
libxrender1 \
|
||||
libxext6 \
|
||||
xvfb \
|
||||
x11-apps \
|
||||
supervisor \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy requirements and install dependencies first for caching
|
||||
COPY requirements_enhanced.txt ./requirements_enhanced.txt
|
||||
RUN pip install --no-cache-dir -r requirements_enhanced.txt
|
||||
|
||||
# Copy all source code and models
|
||||
COPY . .
|
||||
|
||||
# Copy supervisor config
|
||||
COPY supervisord.conf /etc/supervisord.conf
|
||||
|
||||
# Make start.sh executable
|
||||
RUN chmod +x start.sh
|
||||
|
||||
# Expose display for X11 and logs
|
||||
ENV DISPLAY=:99
|
||||
VOLUME ["/app/logs"]
|
||||
|
||||
# Use supervisor to run Xvfb and app together, with logging
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]
|
||||
38
qt_app_pyside1/FixedDebug.spec
Normal file
38
qt_app_pyside1/FixedDebug.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[('ui', 'ui'), ('controllers', 'controllers'), ('utils', 'utils'), ('config.json', '.'), ('splash.py', '.')],
|
||||
hiddenimports=['ui', 'ui.main_window', 'controllers', 'utils', 'cv2', 'openvino', 'numpy', 'PySide6.QtCore', 'PySide6.QtWidgets', 'PySide6.QtGui'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='FixedDebug',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
36
qt_app_pyside1/QUICK_ACTION_PLAN.txt
Normal file
36
qt_app_pyside1/QUICK_ACTION_PLAN.txt
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
🚀 QUICK ACTION PLAN - Fix PyInstaller Build Issues
|
||||
==================================================
|
||||
|
||||
WHAT I'VE DONE FOR YOU:
|
||||
✅ Created missing __init__.py files in ui/ and controllers/
|
||||
✅ Created build_exe_optimized.py with ALL fixes
|
||||
✅ Analyzed your build log and identified all critical errors
|
||||
|
||||
IMMEDIATE NEXT STEPS:
|
||||
1. Run the optimized build script:
|
||||
python build_exe_optimized.py
|
||||
|
||||
2. If build succeeds, test the executable:
|
||||
dist\TrafficMonitoringApp.exe
|
||||
|
||||
KEY FIXES APPLIED:
|
||||
- Missing __init__.py files (CRITICAL ERROR FIX)
|
||||
- Complete hidden import coverage for cv2, numpy, openvino, etc.
|
||||
- Excluded heavy unused modules (50MB+ size reduction)
|
||||
- Proper data file inclusion
|
||||
- Windows-specific optimizations
|
||||
|
||||
WHAT TO EXPECT:
|
||||
- Build should complete successfully now
|
||||
- Executable size ~200MB (down from 300MB+)
|
||||
- All UI components should load
|
||||
- Video processing should work
|
||||
- Configuration loading should work
|
||||
|
||||
IF ISSUES PERSIST:
|
||||
1. Check Python version (3.8-3.11 recommended)
|
||||
2. Verify all packages installed: pip install -r requirements.txt
|
||||
3. Clear cache: python -m pip cache purge
|
||||
4. Run in clean virtual environment
|
||||
"""
|
||||
38
qt_app_pyside1/QuickDebug.spec
Normal file
38
qt_app_pyside1/QuickDebug.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=[],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='QuickDebug',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
74
qt_app_pyside1/README.md
Normal file
74
qt_app_pyside1/README.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# PySide6 Traffic Monitoring Dashboard (Advanced)
|
||||
|
||||
## Features
|
||||
|
||||
- Real-time video detection (OpenVINO, YOLO)
|
||||
- Drag-and-drop video/image, webcam, RTSP
|
||||
- Live overlays (bounding boxes, labels, violations)
|
||||
- Analytics: trends, histograms, summary cards
|
||||
- Violations: searchable, filterable, snapshot preview
|
||||
- Export: CSV/JSON, config editor, reload/apply
|
||||
- Sidebar: device, thresholds, toggles, dark/light mode
|
||||
- Performance overlay: CPU, RAM, FPS, backend
|
||||
- Modern UI: QSS, icons, rounded corners, animations
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
qt_app_pyside/
|
||||
├── main.py
|
||||
├── ui/
|
||||
│ ├── main_window.py
|
||||
│ ├── live_tab.py
|
||||
│ ├── analytics_tab.py
|
||||
│ ├── violations_tab.py
|
||||
│ ├── export_tab.py
|
||||
│ └── config_panel.py
|
||||
├── controllers/
|
||||
│ ├── video_controller.py
|
||||
│ ├── analytics_controller.py
|
||||
│ └── performance_overlay.py
|
||||
├── utils/
|
||||
│ ├── helpers.py
|
||||
│ └── annotation_utils.py
|
||||
├── resources/
|
||||
│ ├── icons/
|
||||
│ ├── style.qss
|
||||
│ └── themes/
|
||||
│ ├── dark.qss
|
||||
│ └── light.qss
|
||||
├── config.json
|
||||
├── requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
1. Install requirements: `pip install -r requirements.txt`
|
||||
|
||||
2. Run the application (several options):
|
||||
- **Recommended**: Use the enhanced controller: `python run_app.py`
|
||||
- Standard mode: `python main.py`
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
The application now includes an enhanced video controller that is automatically activated at startup:
|
||||
|
||||
- ✅ **Async Inference Pipeline**: Better frame rate and responsiveness
|
||||
- ✅ **FP16 Precision**: Optimized for CPU performance
|
||||
- ✅ **Separate FPS Tracking**: UI and detection metrics are tracked separately
|
||||
- ✅ **Auto Model Selection**: Uses optimal model based on device (yolo11n for CPU, yolo11x for GPU)
|
||||
- ✅ **OpenVINO Embedder**: Optimized DeepSORT tracking with OpenVINO backend
|
||||
|
||||
## Integration
|
||||
|
||||
- Plug in your detection logic from `detection_openvino.py` and `violation_openvino.py` in the controllers.
|
||||
- Use `config.json` for all parameters.
|
||||
- Extend UI/controllers for advanced analytics, export, and overlays.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter import errors:
|
||||
|
||||
- Try running with `python run_app.py` which handles import paths automatically
|
||||
- Ensure you have all required dependencies installed
|
||||
- Check that the correct model files exist in the openvino_models directory
|
||||
38
qt_app_pyside1/TrafficMonitor.spec
Normal file
38
qt_app_pyside1/TrafficMonitor.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[('ui', 'ui'), ('controllers', 'controllers'), ('utils', 'utils'), ('openvino_models', 'openvino_models'), ('resources', 'resources'), ('config.json', '.'), ('splash.py', '.')],
|
||||
hiddenimports=['cv2', 'openvino', 'numpy', 'PySide6.QtCore', 'PySide6.QtWidgets', 'PySide6.QtGui', 'json', 'os', 'sys', 'time', 'traceback', 'pathlib'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='TrafficMonitor',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=False,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
38
qt_app_pyside1/TrafficMonitorDebug.spec
Normal file
38
qt_app_pyside1/TrafficMonitorDebug.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[('ui', 'ui'), ('controllers', 'controllers'), ('utils', 'utils'), ('openvino_models', 'openvino_models'), ('resources', 'resources'), ('config.json', '.'), ('splash.py', '.')],
|
||||
hiddenimports=['cv2', 'openvino', 'numpy', 'PySide6.QtCore', 'PySide6.QtWidgets', 'PySide6.QtGui', 'json', 'os', 'sys', 'time', 'traceback', 'pathlib'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='TrafficMonitorDebug',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
38
qt_app_pyside1/TrafficMonitorFixed.spec
Normal file
38
qt_app_pyside1/TrafficMonitorFixed.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[('ui', 'ui'), ('splash.py', '.'), ('config.json', '.'), ('controllers', 'controllers'), ('utils', 'utils'), ('openvino_models', 'openvino_models')],
|
||||
hiddenimports=['json', 'datetime', 'pathlib', 'os', 'sys', 'time', 'traceback'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='TrafficMonitorFixed',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
0
qt_app_pyside1/__init__.py
Normal file
0
qt_app_pyside1/__init__.py
Normal file
Binary file not shown.
BIN
qt_app_pyside1/__pycache__/splash.cpython-311.pyc
Normal file
BIN
qt_app_pyside1/__pycache__/splash.cpython-311.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/Analysis-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/Analysis-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/EXE-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/EXE-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/FixedDebug.pkg
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/FixedDebug.pkg
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/PKG-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/PKG-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/PYZ-00.pyz
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/PYZ-00.pyz
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/PYZ-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/PYZ-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/base_library.zip
Normal file
BIN
qt_app_pyside1/build/FixedDebug/base_library.zip
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod01_archive.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod01_archive.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod02_importers.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod02_importers.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod03_ctypes.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod03_ctypes.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod04_pywin32.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod04_pywin32.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/struct.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/struct.pyc
Normal file
Binary file not shown.
906
qt_app_pyside1/build/FixedDebug/warn-FixedDebug.txt
Normal file
906
qt_app_pyside1/build/FixedDebug/warn-FixedDebug.txt
Normal file
@@ -0,0 +1,906 @@
|
||||
|
||||
This file lists modules PyInstaller was not able to find. This does not
|
||||
necessarily mean this module is required for running your program. Python and
|
||||
Python 3rd-party packages include a lot of conditional or optional modules. For
|
||||
example the module 'ntpath' only exists on Windows, whereas the module
|
||||
'posixpath' only exists on Posix systems.
|
||||
|
||||
Types if import:
|
||||
* top-level: imported at the top-level - look at these first
|
||||
* conditional: imported within an if-statement
|
||||
* delayed: imported within a function
|
||||
* optional: imported within a try-except-statement
|
||||
|
||||
IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
|
||||
tracking down the missing module yourself. Thanks!
|
||||
|
||||
missing module named usercustomize - imported by site (delayed, optional)
|
||||
missing module named sitecustomize - imported by site (delayed, optional)
|
||||
missing module named org - imported by copy (optional)
|
||||
missing module named 'org.python' - imported by pickle (optional), xml.sax (delayed, conditional), setuptools.sandbox (conditional)
|
||||
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional), http.server (delayed, optional), webbrowser (delayed), psutil (optional), netrc (delayed, conditional), getpass (delayed), distutils.util (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), distutils.archive_util (optional), setuptools._distutils.util (delayed, conditional, optional), setuptools._distutils.archive_util (optional)
|
||||
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), distutils.archive_util (optional), setuptools._distutils.archive_util (optional)
|
||||
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional)
|
||||
missing module named resource - imported by posix (top-level), fsspec.asyn (conditional, optional), torch._inductor.codecache (delayed, conditional)
|
||||
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional), zipimport (top-level)
|
||||
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional), zipimport (top-level)
|
||||
missing module named _posixsubprocess - imported by subprocess (conditional), multiprocessing.util (delayed), joblib.externals.loky.backend.fork_exec (delayed)
|
||||
missing module named fcntl - imported by subprocess (optional), xmlrpc.server (optional), tqdm.utils (delayed, optional), absl.flags._helpers (optional), filelock._unix (conditional, optional), pty (delayed, optional), torch.testing._internal.distributed.distributed_test (conditional)
|
||||
missing module named win32evtlog - imported by logging.handlers (delayed, optional)
|
||||
missing module named win32evtlogutil - imported by logging.handlers (delayed, optional)
|
||||
missing module named startup - imported by pyreadline3.keysyms.common (conditional), pyreadline3.keysyms.keysyms (conditional)
|
||||
missing module named sets - imported by pyreadline3.keysyms.common (optional), pytz.tzinfo (optional)
|
||||
missing module named System - imported by pyreadline3.clipboard.ironpython_clipboard (top-level), pyreadline3.keysyms.ironpython_keysyms (top-level), pyreadline3.console.ironpython_console (top-level), pyreadline3.rlmain (conditional)
|
||||
missing module named console - imported by pyreadline3.console.ansi (conditional)
|
||||
missing module named clr - imported by pyreadline3.clipboard.ironpython_clipboard (top-level), pyreadline3.console.ironpython_console (top-level)
|
||||
missing module named IronPythonConsole - imported by pyreadline3.console.ironpython_console (top-level)
|
||||
missing module named vms_lib - imported by platform (delayed, optional)
|
||||
missing module named 'java.lang' - imported by platform (delayed, optional), xml.sax._exceptions (conditional)
|
||||
missing module named java - imported by platform (delayed)
|
||||
missing module named _winreg - imported by platform (delayed, optional), pygments.formatters.img (optional)
|
||||
missing module named termios - imported by tty (top-level), getpass (optional), tqdm.utils (delayed, optional), absl.flags._helpers (optional), click._termui_impl (conditional)
|
||||
missing module named pyimod02_importers - imported by C:\Users\jatin\.conda\envs\traffic_monitor\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgutil.py (delayed), C:\Users\jatin\.conda\envs\traffic_monitor\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgres.py (delayed)
|
||||
missing module named _manylinux - imported by packaging._manylinux (delayed, optional), setuptools._vendor.packaging._manylinux (delayed, optional), wheel.vendored.packaging._manylinux (delayed, optional)
|
||||
missing module named '_typeshed.importlib' - imported by pkg_resources (conditional)
|
||||
missing module named _typeshed - imported by pkg_resources (conditional), setuptools.glob (conditional), setuptools.compat.py311 (conditional), torch.utils._backport_slots (conditional), streamlit.runtime.state.query_params (conditional), git.objects.fun (conditional), streamlit.runtime.state.query_params_proxy (conditional), setuptools._distutils.dist (conditional)
|
||||
missing module named jnius - imported by setuptools._vendor.platformdirs.android (delayed, conditional, optional)
|
||||
missing module named android - imported by setuptools._vendor.platformdirs.android (delayed, conditional, optional)
|
||||
missing module named _posixshmem - imported by multiprocessing.resource_tracker (conditional), multiprocessing.shared_memory (conditional)
|
||||
missing module named multiprocessing.set_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
|
||||
missing module named multiprocessing.get_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
|
||||
missing module named multiprocessing.get_context - imported by multiprocessing (top-level), multiprocessing.pool (top-level), multiprocessing.managers (top-level), multiprocessing.sharedctypes (top-level), joblib.externals.loky.backend.context (top-level)
|
||||
missing module named multiprocessing.TimeoutError - imported by multiprocessing (top-level), multiprocessing.pool (top-level), joblib.parallel (top-level)
|
||||
missing module named _scproxy - imported by urllib.request (conditional)
|
||||
missing module named multiprocessing.BufferTooShort - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
|
||||
missing module named multiprocessing.AuthenticationError - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
|
||||
missing module named multiprocessing.cpu_count - imported by multiprocessing (delayed, conditional, optional), skimage.util.apply_parallel (delayed, conditional, optional)
|
||||
missing module named multiprocessing.Pool - imported by multiprocessing (top-level), torchvision.datasets.kinetics (top-level), scipy._lib._util (delayed, conditional)
|
||||
missing module named multiprocessing.RLock - imported by multiprocessing (delayed, conditional, optional), tqdm.std (delayed, conditional, optional)
|
||||
missing module named asyncio.DefaultEventLoopPolicy - imported by asyncio (delayed, conditional), asyncio.events (delayed, conditional)
|
||||
missing module named 'distutils._modified' - imported by setuptools._distutils.file_util (delayed)
|
||||
missing module named 'distutils._log' - imported by setuptools._distutils.command.bdist_dumb (top-level), setuptools._distutils.command.bdist_rpm (top-level), setuptools._distutils.command.build_clib (top-level), setuptools._distutils.command.build_ext (top-level), setuptools._distutils.command.build_py (top-level), setuptools._distutils.command.build_scripts (top-level), setuptools._distutils.command.clean (top-level), setuptools._distutils.command.config (top-level), setuptools._distutils.command.install (top-level), setuptools._distutils.command.install_scripts (top-level), setuptools._distutils.command.sdist (top-level)
|
||||
missing module named trove_classifiers - imported by setuptools.config._validate_pyproject.formats (optional)
|
||||
missing module named importlib_resources - imported by setuptools._vendor.jaraco.text (optional), tqdm.cli (delayed, conditional, optional), jsonschema_specifications._core (optional)
|
||||
missing module named numpy.arccosh - imported by numpy (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.arcsinh - imported by numpy (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.arctan - imported by numpy (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.tan - imported by numpy (top-level), scipy.signal._spline_filters (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.complex128 - imported by numpy (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.complex64 - imported by numpy (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.greater - imported by numpy (top-level), scipy.optimize._minpack_py (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.power - imported by numpy (top-level), scipy.stats._kde (top-level)
|
||||
missing module named numpy.sinh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.cosh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.tanh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.expm1 - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.log1p - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.ceil - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.log - imported by numpy (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._discrete_distns (top-level), scipy.stats._morestats (top-level), scipy.signal._waveforms (top-level)
|
||||
missing module named numpy.logical_and - imported by numpy (top-level), scipy.stats._distn_infrastructure (top-level)
|
||||
missing module named numpy.sign - imported by numpy (top-level), scipy.linalg._matfuncs (top-level)
|
||||
missing module named numpy.conjugate - imported by numpy (top-level), scipy.linalg._matfuncs (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.logical_not - imported by numpy (top-level), scipy.linalg._matfuncs (top-level)
|
||||
missing module named numpy.single - imported by numpy (top-level), scipy.linalg._decomp_schur (top-level)
|
||||
missing module named numpy.floor - imported by numpy (top-level), scipy.special._basic (top-level), scipy.special._orthogonal (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.arcsin - imported by numpy (top-level), scipy.linalg._decomp_svd (top-level)
|
||||
missing module named numpy.arccos - imported by numpy (top-level), scipy.linalg._decomp_svd (top-level), scipy.special._orthogonal (top-level)
|
||||
missing module named numpy.conj - imported by numpy (top-level), scipy.linalg._decomp (top-level), scipy.io._mmio (top-level)
|
||||
missing module named numpy.inexact - imported by numpy (top-level), scipy.linalg._decomp (top-level), scipy.special._basic (top-level), scipy.optimize._minpack_py (top-level)
|
||||
missing module named _dummy_thread - imported by numpy.core.arrayprint (optional), cffi.lock (conditional, optional), torch._jit_internal (optional)
|
||||
missing module named numpy.core.result_type - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.float_ - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.number - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.object_ - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.max - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.all - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.errstate - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.bool_ - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.inf - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.isnan - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.array2string - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.imag - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.real - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.iscomplexobj - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.signbit - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.isscalar - imported by numpy.core (delayed), numpy.testing._private.utils (delayed), numpy.lib.polynomial (top-level)
|
||||
missing module named win32pdh - imported by numpy.testing._private.utils (delayed, conditional)
|
||||
missing module named numpy.core.array - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.isnat - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.ndarray - imported by numpy.core (top-level), numpy.testing._private.utils (top-level), numpy.lib.utils (top-level)
|
||||
missing module named numpy.core.array_repr - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.arange - imported by numpy.core (top-level), numpy.testing._private.utils (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.empty - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.float32 - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.intp - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.linspace - imported by numpy.core (top-level), numpy.lib.index_tricks (top-level)
|
||||
missing module named numpy.core.iinfo - imported by numpy.core (top-level), numpy.lib.twodim_base (top-level)
|
||||
missing module named numpy.core.transpose - imported by numpy.core (top-level), numpy.lib.function_base (top-level)
|
||||
missing module named numpy._typing._ufunc - imported by numpy._typing (conditional)
|
||||
missing module named numpy.uint - imported by numpy (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level)
|
||||
missing module named numpy.core.asarray - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.utils (top-level), numpy.fft._pocketfft (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.integer - imported by numpy.core (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.sqrt - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.conjugate - imported by numpy.core (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.swapaxes - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.zeros - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.reciprocal - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sort - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.argsort - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sign - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.count_nonzero - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.divide - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.matmul - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.asanyarray - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.atleast_2d - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.prod - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.amax - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.amin - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.moveaxis - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.geterrobj - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.finfo - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.isfinite - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sum - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.multiply - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.add - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.dot - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.Inf - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.newaxis - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.complexfloating - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.inexact - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.cdouble - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.csingle - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.double - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.single - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.intc - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.empty_like - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named pyodide_js - imported by threadpoolctl (delayed, optional)
|
||||
missing module named numpy.core.ufunc - imported by numpy.core (top-level), numpy.lib.utils (top-level)
|
||||
missing module named numpy.core.ones - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.hstack - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.atleast_1d - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.atleast_3d - imported by numpy.core (top-level), numpy.lib.shape_base (top-level)
|
||||
missing module named numpy.core.vstack - imported by numpy.core (top-level), numpy.lib.shape_base (top-level)
|
||||
missing module named pickle5 - imported by numpy.compat.py3k (optional)
|
||||
missing module named numpy.eye - imported by numpy (delayed), numpy.core.numeric (delayed), scipy.optimize._optimize (top-level), scipy.linalg._decomp (top-level), scipy.interpolate._pade (top-level), scipy.signal._lti_conversion (top-level)
|
||||
missing module named numpy.recarray - imported by numpy (top-level), numpy.lib.recfunctions (top-level), numpy.ma.mrecords (top-level)
|
||||
missing module named numpy.expand_dims - imported by numpy (top-level), numpy.ma.core (top-level)
|
||||
missing module named numpy.array - imported by numpy (top-level), numpy.ma.core (top-level), numpy.ma.extras (top-level), numpy.ma.mrecords (top-level), scipy.linalg._decomp (top-level), scipy.linalg._decomp_schur (top-level), scipy.sparse.linalg._isolve.utils (top-level), scipy.stats._stats_py (top-level), scipy.interpolate._interpolate (top-level), scipy.interpolate._fitpack_impl (top-level), scipy.interpolate._fitpack2 (top-level), scipy.integrate._ode (top-level), scipy._lib._finite_differences (top-level), scipy.stats._morestats (top-level), scipy.optimize._lbfgsb_py (top-level), scipy.optimize._tnc (top-level), scipy.optimize._slsqp_py (top-level), dill._objects (optional), scipy.io._netcdf (top-level), scipy.signal._spline_filters (top-level), scipy.signal._filter_design (top-level), scipy.signal._lti_conversion (top-level)
|
||||
missing module named numpy.iscomplexobj - imported by numpy (top-level), numpy.ma.core (top-level), scipy.linalg._decomp (top-level), scipy.linalg._decomp_ldl (top-level)
|
||||
missing module named numpy.amin - imported by numpy (top-level), numpy.ma.core (top-level), scipy.stats._morestats (top-level)
|
||||
missing module named numpy.amax - imported by numpy (top-level), numpy.ma.core (top-level), scipy.linalg._matfuncs (top-level), scipy.stats._morestats (top-level)
|
||||
missing module named numpy.isinf - imported by numpy (top-level), numpy.testing._private.utils (top-level), scipy.stats._distn_infrastructure (top-level)
|
||||
missing module named numpy.isnan - imported by numpy (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.isfinite - imported by numpy (top-level), numpy.testing._private.utils (top-level), scipy.linalg._decomp (top-level), scipy.linalg._matfuncs (top-level), scipy.optimize._slsqp_py (top-level)
|
||||
missing module named numpy.float64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy.stats._mstats_extras (top-level), scipy.optimize._lbfgsb_py (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.float32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.uint64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint16 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint8 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), dill._objects (optional), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int16 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int8 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.bytes_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.str_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.void - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.object_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.datetime64 - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.timedelta64 - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.number - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.complexfloating - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.floating - imported by numpy (top-level), numpy._typing._array_like (top-level), torch._dynamo.variables.misc (optional)
|
||||
missing module named numpy.integer - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ctypeslib (top-level)
|
||||
missing module named numpy.unsignedinteger - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.bool_ - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ma.core (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.generic - imported by numpy (top-level), numpy._typing._array_like (top-level), torch._dynamo.variables.misc (optional)
|
||||
missing module named numpy.dtype - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.array_api._typing (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), numpy.ctypeslib (top-level), scipy.optimize._minpack_py (top-level), dill._dill (delayed), scipy.io._netcdf (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), torch._dynamo.variables.misc (optional), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level)
|
||||
missing module named numpy.ndarray - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ma.core (top-level), numpy.ma.extras (top-level), numpy.lib.recfunctions (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), numpy.ctypeslib (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._mstats_basic (top-level), scipy.stats._mstats_extras (top-level), pandas.compat.numpy.function (top-level), dill._dill (delayed), scipy.io._mmio (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), imageio.typing (optional)
|
||||
missing module named numpy.ufunc - imported by numpy (top-level), numpy._typing (top-level), numpy.testing.overrides (top-level), dill._dill (delayed), dill._objects (optional), skimage._vendored.numpy_lookfor (top-level)
|
||||
missing module named numpy.histogramdd - imported by numpy (delayed), numpy.lib.twodim_base (delayed)
|
||||
missing module named numpy._distributor_init_local - imported by numpy (optional), numpy._distributor_init (optional)
|
||||
missing module named openvino_tokenizers - imported by openvino.tools.ovc.utils (delayed, optional)
|
||||
missing module named StringIO - imported by six (conditional)
|
||||
missing module named six.moves.zip - imported by six.moves (top-level), pasta.base.annotate (top-level)
|
||||
runtime module named six.moves - imported by dateutil.tz.tz (top-level), dateutil.tz._factories (top-level), dateutil.tz.win (top-level), dateutil.rrule (top-level), astunparse (top-level), tensorflow.python.distribute.coordinator.cluster_coordinator (top-level), six.moves.urllib (top-level), tensorflow.python.distribute.multi_process_runner (top-level), pasta.base.annotate (top-level)
|
||||
missing module named six.moves.cStringIO - imported by six.moves (top-level), astunparse (top-level)
|
||||
missing module named six.moves.range - imported by six.moves (top-level), dateutil.rrule (top-level)
|
||||
missing module named rules_python - imported by tensorflow.python.platform.resource_loader (optional)
|
||||
missing module named google.protobuf.pyext._message - imported by google.protobuf.pyext (conditional, optional), google.protobuf.internal.api_implementation (conditional, optional), google.protobuf.descriptor (conditional), google.protobuf.pyext.cpp_message (conditional)
|
||||
missing module named google.protobuf.enable_deterministic_proto_serialization - imported by google.protobuf (optional), google.protobuf.internal.api_implementation (optional)
|
||||
missing module named google.protobuf.internal._api_implementation - imported by google.protobuf.internal (optional), google.protobuf.internal.api_implementation (optional)
|
||||
missing module named astn - imported by gast.ast2 (top-level)
|
||||
missing module named theano - imported by opt_einsum.backends.theano (delayed)
|
||||
missing module named jax - imported by scipy._lib.array_api_compat.common._helpers (delayed), optree.integrations.jax (top-level), opt_einsum.backends.jax (delayed, conditional), keras.src.trainers.data_adapters.data_adapter_utils (delayed), keras.src.backend.jax.core (top-level), keras.src.backend.jax.distribution_lib (top-level), keras.src.backend.jax.image (top-level), keras.src.backend.jax.linalg (top-level), keras.src.backend.jax.math (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.random (top-level), keras.src.backend.jax.rnn (top-level), keras.src.backend.jax.trainer (top-level), keras.src.backend.numpy.nn (top-level), keras.src.backend.jax.export (delayed), tensorflow.lite.python.util (optional), sklearn.externals.array_api_compat.common._helpers (delayed), sklearn.externals.array_api_extra._lib._lazy (delayed, conditional), openvino.frontend.jax.utils (top-level), openvino.frontend.jax.jaxpr_decoder (top-level), openvino.tools.ovc.convert_impl (delayed, conditional), keras.src.backend.jax.optimizer (top-level), keras.src.ops.nn (delayed, conditional), scipy._lib._array_api (delayed, conditional)
|
||||
missing module named cupy - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), opt_einsum.backends.cupy (delayed), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.cupy (top-level), sklearn.externals.array_api_compat.cupy._aliases (top-level), sklearn.externals.array_api_compat.cupy._info (top-level), sklearn.externals.array_api_compat.cupy._typing (top-level), sklearn.utils._testing (delayed, conditional), scipy._lib.array_api_compat.cupy (top-level), scipy._lib.array_api_compat.cupy._aliases (top-level), scipy._lib.array_api_compat.cupy._info (top-level), scipy._lib.array_api_compat.cupy._typing (top-level), scipy._lib._array_api (delayed, conditional), narwhals._pandas_like.series (delayed, conditional), sklearn.externals.array_api_compat.cupy.fft (top-level), sklearn.externals.array_api_compat.cupy.linalg (top-level)
|
||||
missing module named simplejson - imported by requests.compat (conditional, optional), huggingface_hub.utils._fixes (optional)
|
||||
missing module named dummy_threading - imported by requests.cookies (optional), joblib.compressor (optional)
|
||||
missing module named 'h2.events' - imported by urllib3.http2.connection (top-level)
|
||||
missing module named 'h2.connection' - imported by urllib3.http2.connection (top-level)
|
||||
missing module named h2 - imported by urllib3.http2.connection (top-level)
|
||||
missing module named zstandard - imported by urllib3.util.request (optional), urllib3.response (optional), fsspec.compression (optional)
|
||||
missing module named brotlicffi - imported by urllib3.util.request (optional), urllib3.response (optional), aiohttp.compression_utils (optional)
|
||||
missing module named collections.Callable - imported by collections (optional), cffi.api (optional), socks (optional)
|
||||
missing module named bcrypt - imported by cryptography.hazmat.primitives.serialization.ssh (optional)
|
||||
missing module named cryptography.x509.UnsupportedExtension - imported by cryptography.x509 (optional), urllib3.contrib.pyopenssl (optional)
|
||||
missing module named chardet - imported by requests (optional), pygments.lexer (delayed, conditional, optional)
|
||||
missing module named 'pyodide.ffi' - imported by urllib3.contrib.emscripten.fetch (delayed, optional)
|
||||
missing module named pyodide - imported by urllib3.contrib.emscripten.fetch (top-level)
|
||||
missing module named js - imported by urllib3.contrib.emscripten.fetch (top-level), fsspec.implementations.http_sync (delayed, optional)
|
||||
missing module named oauth2client - imported by tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver (optional), tensorflow.python.tpu.client.client (optional)
|
||||
missing module named googleapiclient - imported by tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver (optional), tensorflow.python.tpu.client.client (optional)
|
||||
missing module named cloud_tpu_client - imported by tensorflow.python.distribute.cluster_resolver.tpu.tpu_cluster_resolver (optional)
|
||||
missing module named kubernetes - imported by tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver (delayed, conditional, optional)
|
||||
missing module named distributed - imported by fsspec.transaction (delayed), joblib._dask (optional), joblib._parallel_backends (delayed, optional)
|
||||
missing module named 'sphinx.ext' - imported by pyarrow.vendored.docscrape (delayed, conditional)
|
||||
missing module named dateutil.tz.tzfile - imported by dateutil.tz (top-level), dateutil.zoneinfo (top-level)
|
||||
missing module named pytest - imported by scipy._lib._testutils (delayed), sympy.testing.runtests_pytest (optional), torch.testing._internal.common_utils (delayed, conditional, optional), h5py.tests (delayed, optional), networkx.classes.backends (conditional, optional), sklearn.utils._testing (optional), torch.testing._internal.optests.generate_tests (delayed, conditional), pandas._testing._io (delayed), pandas._testing (delayed), skimage._shared.tester (delayed), fsspec.conftest (top-level), pyarrow.conftest (top-level), pyarrow.tests.util (top-level), torch._numpy.testing.utils (delayed), skimage.filters.rank.tests.test_rank (top-level), skimage.data._fetchers (delayed, conditional), skimage._shared.testing (top-level)
|
||||
missing module named 'cupy_backends.cuda' - imported by scipy._lib.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'cupy.cuda' - imported by sklearn.externals.array_api_compat.cupy._typing (top-level), sklearn.externals.array_api_compat.common._helpers (delayed), scipy._lib.array_api_compat.cupy._typing (top-level), scipy._lib.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'jax.experimental' - imported by keras.src.trainers.data_adapters.data_adapter_utils (delayed), keras.src.testing.test_case (delayed, conditional), keras.src.backend.jax.core (top-level), keras.src.backend.jax.distribution_lib (top-level), keras.src.backend.jax.numpy (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.sparse (top-level), keras.src.backend.jax.export (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), scipy._lib.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named 'jax.numpy' - imported by optree.integrations.jax (top-level), keras.src.backend.jax.core (top-level), keras.src.backend.jax.image (top-level), keras.src.backend.jax.linalg (top-level), keras.src.backend.jax.math (top-level), keras.src.backend.jax.numpy (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.sparse (top-level), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), openvino.frontend.jax.utils (top-level), scipy._lib.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named sparse - imported by scipy.sparse.linalg._expm_multiply (delayed, conditional), scipy.sparse.linalg._matfuncs (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), scipy._lib.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named 'dask.array' - imported by sklearn.externals.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.dask.array (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.common._helpers (delayed, conditional), scipy._lib.array_api_compat.dask.array (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), narwhals._dask.expr (delayed), skimage.util.apply_parallel (delayed, optional), sklearn.externals.array_api_compat.dask.array.fft (top-level), sklearn.externals.array_api_compat.dask.array.linalg (top-level)
|
||||
missing module named ndonnx - imported by sklearn.externals.array_api_compat.common._helpers (delayed), scipy._lib.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'numpy.lib.array_utils' - imported by joblib._memmapping_reducer (delayed, optional), sklearn.externals.array_api_compat.common._linalg (conditional), scipy._lib.array_api_compat.common._linalg (conditional)
|
||||
missing module named 'numpy.linalg._linalg' - imported by sklearn.externals.array_api_compat.numpy.linalg (delayed, optional), scipy._lib.array_api_compat.numpy.linalg (delayed, optional)
|
||||
missing module named Cython - imported by scipy._lib._testutils (optional)
|
||||
missing module named cython - imported by scipy._lib._testutils (optional), pyarrow.conftest (optional)
|
||||
missing module named sphinx - imported by scipy._lib._docscrape (delayed, conditional)
|
||||
missing module named cupyx - imported by scipy._lib._array_api (delayed, conditional)
|
||||
missing module named scipy.sparse.issparse - imported by scipy.sparse (top-level), scipy.sparse.linalg._interface (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.sparse.linalg._norm (top-level), scipy.integrate._ivp.bdf (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.radau (top-level), scipy.sparse.csgraph._laplacian (top-level), scipy.optimize._constraints (top-level), scipy.optimize._trustregion_constr.projections (top-level), scipy.optimize._lsq.least_squares (top-level), scipy.optimize._lsq.common (top-level), scipy.optimize._lsq.lsq_linear (top-level), scipy.optimize._linprog_highs (top-level), scipy.optimize._differentialevolution (top-level), scipy.optimize._milp (top-level), scipy.io.matlab._mio (delayed, conditional), scipy.io._fast_matrix_market (top-level), scipy.io._mmio (top-level), tensorflow.python.keras.engine.data_adapter (delayed, optional), tensorflow.python.keras.engine.training_arrays_v1 (optional), tensorflow.python.keras.engine.training_v1 (optional), sklearn.utils._param_validation (top-level), sklearn.externals._scipy.sparse.csgraph._laplacian (top-level), sklearn.utils._set_output (top-level), sklearn.utils.multiclass (top-level), sklearn.metrics.cluster._unsupervised (top-level), sklearn.metrics.pairwise (top-level), sklearn.metrics._pairwise_distances_reduction._dispatcher (top-level), sklearn.cluster._feature_agglomeration (top-level), sklearn.cluster._bicluster (top-level), sklearn.neighbors._base (top-level), sklearn.decomposition._pca (top-level), sklearn.cluster._hdbscan.hdbscan (top-level), sklearn.cluster._optics (top-level), sklearn.manifold._isomap (top-level), sklearn.manifold._t_sne (top-level), sklearn.metrics._classification (top-level), sklearn.metrics._ranking (top-level), sklearn.utils._indexing (top-level), scipy._lib._array_api (delayed), pandas.core.dtypes.common (delayed, conditional, optional), sklearn.tree._classes (top-level), scipy.sparse.csgraph._validation (top-level)
|
||||
missing module named scipy.linalg._fblas_64 - imported by scipy.linalg (optional), scipy.linalg.blas (optional)
|
||||
missing module named scipy.linalg._cblas - imported by scipy.linalg (optional), scipy.linalg.blas (optional)
|
||||
missing module named scipy.linalg._flapack_64 - imported by scipy.linalg (optional), scipy.linalg.lapack (optional)
|
||||
missing module named scipy.linalg._clapack - imported by scipy.linalg (optional), scipy.linalg.lapack (optional)
|
||||
missing module named scipy.special.elliprg - imported by scipy.special (top-level), skimage.draw.draw3d (top-level)
|
||||
missing module named scipy.special.inv_boxcox - imported by scipy.special (top-level), sklearn.preprocessing._data (top-level)
|
||||
missing module named scipy.special.boxcox - imported by scipy.special (top-level), sklearn.preprocessing._data (top-level)
|
||||
missing module named scipy.special.sph_jn - imported by scipy.special (delayed, conditional, optional), sympy.functions.special.bessel (delayed, conditional, optional)
|
||||
missing module named scipy.special.gammaincinv - imported by scipy.special (top-level), scipy.stats._qmvnt (top-level)
|
||||
missing module named scipy.special.ive - imported by scipy.special (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.special.betaln - imported by scipy.special (top-level), scipy.stats._discrete_distns (top-level), scipy.stats._multivariate (top-level), sklearn.mixture._bayesian_mixture (top-level)
|
||||
missing module named scipy.special.beta - imported by scipy.special (top-level), scipy.stats._tukeylambda_stats (top-level)
|
||||
missing module named scipy.special.loggamma - imported by scipy.special (top-level), scipy.fft._fftlog_backend (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.interpolate.PPoly - imported by scipy.interpolate (top-level), scipy.interpolate._cubic (top-level), scipy.spatial.transform._rotation_spline (delayed), scipy.integrate._bvp (delayed)
|
||||
missing module named _curses - imported by curses (top-level), curses.has_key (top-level)
|
||||
missing module named olefile - imported by PIL.FpxImagePlugin (top-level), PIL.MicImagePlugin (top-level)
|
||||
missing module named xmlrpclib - imported by defusedxml.xmlrpc (conditional)
|
||||
missing module named railroad - imported by pyparsing.diagram (top-level)
|
||||
missing module named pyparsing.Word - imported by pyparsing (delayed), pyparsing.unicode (delayed), pydot.dot_parser (top-level)
|
||||
missing module named gi - imported by matplotlib.cbook (delayed, conditional)
|
||||
missing module named 'scikits.umfpack' - imported by scipy.optimize._linprog_ip (optional)
|
||||
missing module named 'sksparse.cholmod' - imported by scipy.optimize._linprog_ip (optional)
|
||||
missing module named sksparse - imported by scipy.optimize._trustregion_constr.projections (optional), scipy.optimize._linprog_ip (optional)
|
||||
missing module named scipy.optimize.root_scalar - imported by scipy.optimize (top-level), scipy.stats._continuous_distns (top-level), scipy.stats._stats_py (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.optimize.brentq - imported by scipy.optimize (delayed), scipy.integrate._ivp.ivp (delayed), scipy.stats._binomtest (top-level), scipy.stats._odds_ratio (top-level)
|
||||
missing module named scipy.optimize.OptimizeResult - imported by scipy.optimize (top-level), scipy.integrate._bvp (top-level), scipy.integrate._ivp.ivp (top-level), scipy._lib.cobyqa.main (top-level), scipy._lib.cobyqa.problem (top-level), scipy.optimize._lsq.least_squares (top-level), scipy.optimize._lsq.trf (top-level), scipy.optimize._lsq.dogbox (top-level), scipy.optimize._lsq.lsq_linear (top-level), scipy.optimize._lsq.trf_linear (top-level), scipy.optimize._lsq.bvls (top-level), scipy.optimize._spectral (top-level), scipy.optimize._differentialevolution (top-level), scipy.optimize._shgo (top-level), scipy.optimize._dual_annealing (top-level), scipy.optimize._qap (top-level), scipy.optimize._direct_py (top-level)
|
||||
missing module named scipy.optimize.minimize_scalar - imported by scipy.optimize (top-level), scipy.interpolate._bsplines (top-level), scipy.stats._multicomp (top-level)
|
||||
missing module named scipy.special.airy - imported by scipy.special (top-level), scipy.special._orthogonal (top-level)
|
||||
missing module named scipy.linalg.orthogonal_procrustes - imported by scipy.linalg (top-level), scipy.spatial._procrustes (top-level)
|
||||
missing module named scipy.linalg.qr_insert - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level)
|
||||
missing module named uarray - imported by scipy._lib.uarray (conditional, optional)
|
||||
missing module named scipy.sparse.linalg.matrix_power - imported by scipy.sparse.linalg (delayed), scipy.sparse._matrix (delayed)
|
||||
missing module named scikits - imported by scipy.sparse.linalg._dsolve.linsolve (optional)
|
||||
missing module named scipy.sparse.lil_matrix - imported by scipy.sparse (top-level), sklearn.manifold._locally_linear (top-level)
|
||||
missing module named scipy.sparse.dia_matrix - imported by scipy.sparse (top-level), sklearn.cluster._bicluster (top-level)
|
||||
missing module named scipy.sparse.sparray - imported by scipy.sparse (optional), sklearn.utils.fixes (optional)
|
||||
missing module named scipy.sparse.coo_array - imported by scipy.sparse (top-level), scipy.io._fast_matrix_market (top-level), scipy.io._mmio (top-level)
|
||||
missing module named scipy.sparse.vstack - imported by scipy.sparse (top-level), scipy.optimize._linprog_highs (top-level), scipy.optimize._milp (top-level)
|
||||
missing module named scipy.sparse.bmat - imported by scipy.sparse (top-level), scipy.optimize._trustregion_constr.projections (top-level), scipy.optimize._trustregion_constr.qp_subproblem (top-level)
|
||||
missing module named scipy.sparse.find - imported by scipy.sparse (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.common (top-level)
|
||||
missing module named scipy.sparse.csr_matrix - imported by scipy.sparse (top-level), scipy.optimize._numdiff (top-level), scipy.optimize._lsq.lsq_linear (top-level), sklearn.utils._param_validation (top-level), sklearn.metrics.pairwise (top-level), sklearn.neighbors._base (top-level), sklearn.manifold._locally_linear (top-level), sklearn.manifold._t_sne (top-level), sklearn.metrics._classification (top-level), sklearn.metrics._ranking (top-level)
|
||||
missing module named scipy.sparse.csc_matrix - imported by scipy.sparse (top-level), scipy.integrate._bvp (top-level), scipy.integrate._ivp.bdf (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.radau (top-level), scipy.linalg._sketches (top-level), scipy.optimize._trustregion_constr.projections (top-level), scipy.optimize._trustregion_constr.qp_subproblem (top-level), scipy.optimize._linprog_highs (top-level), scipy.io._harwell_boeing.hb (top-level), sklearn.cluster._spectral (top-level)
|
||||
missing module named scipy.sparse.coo_matrix - imported by scipy.sparse (top-level), scipy.integrate._bvp (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.common (top-level), scipy.stats._crosstab (top-level), pandas.core.arrays.sparse.accessor (delayed), scipy.io.matlab._mio (delayed, conditional), scipy.io._fast_matrix_market (top-level), scipy.io._mmio (top-level), sklearn.metrics._classification (top-level)
|
||||
missing module named scipy.sparse.diags - imported by scipy.sparse (delayed), scipy.sparse.linalg._special_sparse_arrays (delayed)
|
||||
missing module named scipy.sparse.spdiags - imported by scipy.sparse (delayed), scipy.sparse.linalg._special_sparse_arrays (delayed)
|
||||
missing module named scipy.sparse.dia_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level)
|
||||
missing module named scipy.sparse.kron - imported by scipy.sparse (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level)
|
||||
missing module named scipy.sparse.eye - imported by scipy.sparse (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level), scipy.integrate._ivp.bdf (top-level), scipy.integrate._ivp.radau (top-level), scipy.optimize._trustregion_constr.equality_constrained_sqp (top-level), scipy.optimize._trustregion_constr.projections (top-level), sklearn.manifold._locally_linear (top-level)
|
||||
missing module named scipy.sparse.diags_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level)
|
||||
missing module named scipy.sparse.eye_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level)
|
||||
missing module named scipy.sparse.csc_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.optimize._milp (top-level), scipy.io._harwell_boeing.hb (top-level)
|
||||
missing module named scipy.sparse.csr_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.interpolate._bsplines (top-level), scipy.interpolate._ndbspline (top-level)
|
||||
missing module named scipy.sparse.SparseEfficiencyWarning - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), sklearn.cluster._optics (top-level)
|
||||
missing module named scipy.stats.iqr - imported by scipy.stats (delayed), scipy.stats._hypotests (delayed)
|
||||
missing module named dummy_thread - imported by cffi.lock (conditional, optional)
|
||||
missing module named thread - imported by cffi.lock (conditional, optional), cffi.cparser (conditional, optional)
|
||||
missing module named cStringIO - imported by cffi.ffiplatform (optional)
|
||||
missing module named cPickle - imported by pycparser.ply.yacc (delayed, optional)
|
||||
missing module named cffi._pycparser - imported by cffi (optional), cffi.cparser (optional)
|
||||
missing module named scipy._distributor_init_local - imported by scipy (optional), scipy._distributor_init (optional)
|
||||
missing module named numexpr - imported by pandas.core.computation.expressions (conditional), pandas.core.computation.engines (delayed)
|
||||
missing module named pandas.core.groupby.PanelGroupBy - imported by pandas.core.groupby (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named numba - imported by pandas.core._numba.executor (delayed, conditional), pandas.core.util.numba_ (delayed, conditional), pandas.core.groupby.numba_ (delayed, conditional), pandas.core.window.numba_ (delayed, conditional), pandas.core.window.online (delayed, conditional), pandas.core._numba.kernels.mean_ (top-level), pandas.core._numba.kernels.shared (top-level), pandas.core._numba.kernels.sum_ (top-level), pandas.core._numba.kernels.min_max_ (top-level), pandas.core._numba.kernels.var_ (top-level), pandas.core._numba.extensions (top-level)
|
||||
missing module named 'numba.extending' - imported by pandas.core._numba.kernels.sum_ (top-level)
|
||||
missing module named pandas.core.window._Rolling_and_Expanding - imported by pandas.core.window (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named 'numba.typed' - imported by pandas.core._numba.extensions (delayed)
|
||||
missing module named 'numba.core' - imported by pandas.core._numba.extensions (top-level)
|
||||
missing module named traitlets - imported by pandas.io.formats.printing (delayed, conditional), plotly.basewidget (top-level), pydeck.widget.widget (top-level), altair.jupyter.jupyter_chart (top-level)
|
||||
missing module named 'IPython.core' - imported by sympy.interactive.printing (delayed, optional), pandas.io.formats.printing (delayed, conditional), h5py (delayed, conditional, optional), h5py.ipy_completer (top-level), rich.pretty (delayed, optional), altair.utils.core (delayed, conditional), altair._magics (top-level)
|
||||
missing module named IPython - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional, optional), pandas.io.formats.printing (delayed), h5py (delayed, conditional, optional), h5py.ipy_completer (top-level), keras.src.utils.model_visualization (delayed, conditional, optional), keras.src.saving.file_editor (delayed, optional), tensorflow.python.keras.utils.vis_utils (delayed, conditional, optional)
|
||||
missing module named botocore - imported by pandas.io.common (delayed, conditional, optional)
|
||||
missing module named 'lxml.etree' - imported by openpyxl.xml (delayed, optional), openpyxl.xml.functions (conditional), pandas.io.xml (delayed), pandas.io.formats.xml (delayed), networkx.readwrite.graphml (delayed, optional), pandas.io.html (delayed), imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named openpyxl.tests - imported by openpyxl.reader.excel (optional)
|
||||
missing module named 'odf.config' - imported by pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.style' - imported by pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.text' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.table' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.opendocument' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named xlrd - imported by pandas.io.excel._xlrd (delayed, conditional), pandas.io.excel._base (delayed, conditional)
|
||||
missing module named pyxlsb - imported by pandas.io.excel._pyxlsb (delayed, conditional)
|
||||
missing module named 'odf.office' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named 'odf.element' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named 'odf.namespaces' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named odf - imported by pandas.io.excel._odfreader (conditional)
|
||||
missing module named python_calamine - imported by pandas.io.excel._calamine (delayed, conditional)
|
||||
missing module named collections.Mapping - imported by collections (optional), pytz.lazy (optional)
|
||||
missing module named UserDict - imported by pytz.lazy (optional)
|
||||
missing module named Foundation - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named AppKit - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named PyQt4 - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named qtpy - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named 'sqlalchemy.engine' - imported by pandas.io.sql (delayed), streamlit.connections.sql_connection (conditional)
|
||||
missing module named 'sqlalchemy.types' - imported by pandas.io.sql (delayed, conditional)
|
||||
missing module named 'sqlalchemy.schema' - imported by pandas.io.sql (delayed)
|
||||
missing module named 'sqlalchemy.sql' - imported by pandas.io.sql (conditional)
|
||||
missing module named sqlalchemy - imported by pandas.io.sql (delayed, conditional), streamlit.connections.sql_connection (delayed)
|
||||
missing module named pandas.core.internals.Block - imported by pandas.core.internals (conditional), pandas.io.pytables (conditional)
|
||||
missing module named tables - imported by pandas.io.pytables (delayed, conditional)
|
||||
missing module named lxml - imported by sympy.utilities.mathml (delayed), pandas.io.xml (conditional), tifffile.tifffile (delayed, optional)
|
||||
missing module named 'google.auth' - imported by pandas.io.gbq (conditional)
|
||||
missing module named 'lxml.html' - imported by pandas.io.html (delayed)
|
||||
missing module named bs4 - imported by pandas.io.html (delayed)
|
||||
missing module named pandas.Panel - imported by pandas (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named 'pandas.api.internals' - imported by pyarrow.pandas_compat (delayed, conditional)
|
||||
missing module named 'pyarrow._cuda' - imported by pyarrow.cuda (top-level)
|
||||
missing module named 'pyarrow.gandiva' - imported by pyarrow.conftest (optional)
|
||||
missing module named 'pyarrow._azurefs' - imported by pyarrow.fs (optional)
|
||||
missing module named 'setuptools_scm.git' - imported by pyarrow (delayed, optional)
|
||||
missing module named setuptools_scm - imported by matplotlib (delayed, conditional, optional), pyarrow (optional), tqdm.version (optional)
|
||||
missing module named fastparquet - imported by fsspec.parquet (delayed), pyarrow.conftest (optional)
|
||||
missing module named requests_kerberos - imported by fsspec.implementations.webhdfs (delayed, conditional)
|
||||
missing module named smbprotocol - imported by fsspec.implementations.smb (top-level)
|
||||
missing module named smbclient - imported by fsspec.implementations.smb (top-level)
|
||||
missing module named paramiko - imported by fsspec.implementations.sftp (top-level)
|
||||
missing module named kerchunk - imported by fsspec.implementations.reference (delayed)
|
||||
missing module named ujson - imported by fsspec.implementations.cache_metadata (optional), fsspec.implementations.reference (optional)
|
||||
missing module named 'libarchive.ffi' - imported by fsspec.implementations.libarchive (top-level)
|
||||
missing module named libarchive - imported by fsspec.implementations.libarchive (top-level)
|
||||
missing module named uvloop - imported by aiohttp.worker (delayed)
|
||||
missing module named annotationlib - imported by attr._compat (conditional)
|
||||
missing module named async_timeout - imported by aiohttp.helpers (conditional), aiohttp.web_ws (conditional), aiohttp.client_ws (conditional)
|
||||
missing module named 'gunicorn.workers' - imported by aiohttp.worker (top-level)
|
||||
missing module named gunicorn - imported by aiohttp.worker (top-level)
|
||||
missing module named aiodns - imported by aiohttp.resolver (optional)
|
||||
missing module named pygit2 - imported by fsspec.implementations.git (top-level)
|
||||
missing module named 'distributed.worker' - imported by fsspec.implementations.dask (top-level)
|
||||
missing module named 'distributed.client' - imported by fsspec.implementations.dask (top-level)
|
||||
missing module named dask - imported by joblib._dask (optional), sklearn.externals.array_api_extra._lib._lazy (delayed, conditional), narwhals._polars.dataframe (delayed, conditional), narwhals._pandas_like.dataframe (delayed, conditional), narwhals._arrow.dataframe (delayed, conditional), fsspec.implementations.dask (top-level), skimage.restoration._cycle_spin (optional)
|
||||
missing module named panel - imported by fsspec.gui (top-level)
|
||||
missing module named fuse - imported by fsspec.fuse (top-level)
|
||||
missing module named lz4 - imported by fsspec.compression (optional), joblib.compressor (optional)
|
||||
missing module named snappy - imported by fsspec.compression (delayed, optional)
|
||||
missing module named lzmaffi - imported by fsspec.compression (optional)
|
||||
missing module named isal - imported by fsspec.compression (optional)
|
||||
missing module named 'IPython.display' - imported by tqdm.notebook (conditional, optional), rich.jupyter (delayed, optional), rich.live (delayed, conditional, optional), huggingface_hub._login (delayed, optional), pydeck.io.html (delayed), altair.vegalite.v5.display (delayed), altair.vegalite.v5.api (delayed, conditional)
|
||||
missing module named 'IPython.html' - imported by tqdm.notebook (conditional, optional)
|
||||
missing module named ipywidgets - imported by tqdm.notebook (conditional, optional), rich.live (delayed, conditional, optional), plotly.graph_objects (delayed, conditional, optional), plotly.graph_objs (delayed, conditional, optional), pydeck.widget.widget (top-level)
|
||||
missing module named boto3 - imported by tensorboard.compat.tensorflow_stub.io.gfile (optional)
|
||||
missing module named 'botocore.exceptions' - imported by tensorboard.compat.tensorflow_stub.io.gfile (optional)
|
||||
missing module named tensorboard.compat.notf - imported by tensorboard.compat (delayed, optional)
|
||||
missing module named 'tensorflow.compat' - imported by keras.src.callbacks.tensorboard (delayed), tensorboard.util.op_evaluator (delayed), tensorboard.util.encoder (delayed), tensorboard.plugins.audio.summary (delayed), tensorboard.plugins.custom_scalar.summary (delayed), tensorboard.plugins.histogram.summary (delayed), tensorboard.plugins.image.summary (delayed), tensorboard.plugins.pr_curve.summary (delayed), tensorboard.plugins.scalar.summary (delayed), tensorboard.plugins.text.summary (delayed)
|
||||
missing module named 'keras.optimizers.optimizer_v2' - imported by tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named triton - imported by torch._utils_internal (delayed, conditional), torch._dynamo.logging (conditional, optional), torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed), torch._inductor.runtime.autotune_cache (conditional), torch._inductor.runtime.coordinate_descent_tuner (optional), torch._inductor.runtime.triton_heuristics (conditional, optional), torch._inductor.codegen.wrapper (delayed, conditional), torch._inductor.kernel.mm_common (delayed), torch._inductor.kernel.mm_plus_mm (delayed), torch.sparse._triton_ops_meta (delayed, conditional), torch.sparse._triton_ops (conditional), torch._dynamo.utils (conditional), torch._inductor.compile_worker.__main__ (optional), torch._inductor.runtime.triton_helpers (top-level), torch.testing._internal.triton_utils (conditional)
|
||||
missing module named 'torch._C._distributed_c10d' - imported by torch.distributed (conditional), torch.distributed.distributed_c10d (top-level), torch.distributed.constants (top-level), torch.distributed.rpc (conditional), torch.distributed.tensor._collective_utils (top-level), torch.distributed._shard.sharded_tensor.reshard (top-level), torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag (top-level), torch.testing._internal.distributed.fake_pg (top-level), torch._dynamo.variables.distributed (delayed), torch.distributed._symmetric_memory (top-level), torch.distributed.elastic.control_plane (delayed), torch.testing._internal.distributed.multi_threaded_pg (top-level)
|
||||
missing module named torch.randperm - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named torch.Generator - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named torch.default_generator - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named soundfile - imported by torchaudio._backend.soundfile_backend (conditional, optional)
|
||||
missing module named torch.norm_except_dim - imported by torch (top-level), torch.nn.utils.weight_norm (top-level)
|
||||
missing module named torch._weight_norm - imported by torch (top-level), torch.nn.utils.weight_norm (top-level)
|
||||
missing module named 'triton.language' - imported by torch._inductor.codegen.triton_split_scan (delayed), torch._inductor.codegen.wrapper (delayed), torch.sparse._triton_ops (conditional), torch._inductor.runtime.triton_helpers (top-level), torch.testing._internal.triton_utils (conditional)
|
||||
missing module named 'triton.runtime' - imported by torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed), torch._inductor.runtime.triton_heuristics (conditional), torch._library.triton (delayed), torch._inductor.select_algorithm (delayed, optional), torch._inductor.ir (delayed), torch._dynamo.variables.builder (delayed, conditional), torch._inductor.fx_passes.reinplace (delayed, conditional), torch._inductor.utils (delayed)
|
||||
missing module named 'triton.compiler' - imported by torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed, optional), torch._inductor.runtime.hints (optional), torch._inductor.runtime.triton_heuristics (conditional, optional), torch._inductor.scheduler (delayed), torch._inductor.codegen.triton (delayed), torch._inductor.codecache (delayed, optional), torch._inductor.async_compile (delayed, optional)
|
||||
missing module named dl - imported by setuptools.command.build_ext (conditional, optional)
|
||||
missing module named 'Cython.Distutils' - imported by setuptools.command.build_ext (conditional, optional)
|
||||
missing module named 'win32com.shell' - imported by torch._appdirs (conditional, optional)
|
||||
missing module named 'com.sun' - imported by torch._appdirs (delayed, conditional, optional)
|
||||
missing module named com - imported by torch._appdirs (delayed)
|
||||
missing module named win32api - imported by torch._appdirs (delayed, conditional, optional)
|
||||
missing module named win32com - imported by torch._appdirs (delayed)
|
||||
missing module named halide - imported by torch._inductor.codecache (delayed, conditional), torch._inductor.runtime.halide_helpers (optional)
|
||||
missing module named gmpy2.qdiv - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.lcm - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.gcd - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.gcdext - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.denom - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.numer - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.mpq - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.mpz - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named 'pyglet.image' - imported by sympy.printing.preview (delayed, optional)
|
||||
missing module named 'pyglet.window' - imported by sympy.plotting.pygletplot.managed_window (top-level), sympy.plotting.pygletplot.plot_controller (top-level), sympy.printing.preview (delayed, optional)
|
||||
missing module named pyglet - imported by sympy.plotting.pygletplot.plot (optional), sympy.plotting.pygletplot.plot_axes (top-level), sympy.printing.preview (delayed, conditional, optional), sympy.testing.runtests (delayed, conditional)
|
||||
missing module named 'pyglet.gl' - imported by sympy.plotting.pygletplot.plot_axes (top-level), sympy.plotting.pygletplot.util (top-level), sympy.plotting.pygletplot.plot_window (top-level), sympy.plotting.pygletplot.plot_camera (top-level), sympy.plotting.pygletplot.plot_rotation (top-level), sympy.plotting.pygletplot.plot_curve (top-level), sympy.plotting.pygletplot.plot_mode_base (top-level), sympy.plotting.pygletplot.plot_surface (top-level)
|
||||
missing module named 'pyglet.clock' - imported by sympy.plotting.pygletplot.managed_window (top-level)
|
||||
missing module named 'sage.libs' - imported by mpmath.libmp.backend (conditional, optional), mpmath.libmp.libelefun (conditional, optional), mpmath.libmp.libmpf (conditional, optional), mpmath.libmp.libmpc (conditional, optional), mpmath.libmp.libhyper (delayed, conditional), mpmath.ctx_mp (conditional)
|
||||
missing module named sage - imported by mpmath.libmp.backend (conditional, optional)
|
||||
missing module named gmpy - imported by mpmath.libmp.backend (conditional, optional)
|
||||
missing module named pysat - imported by sympy.logic.algorithms.minisat22_wrapper (delayed)
|
||||
missing module named pycosat - imported by sympy.logic.algorithms.pycosat_wrapper (delayed)
|
||||
missing module named flint - imported by sympy.external.gmpy (delayed, optional), sympy.polys.polyutils (conditional), sympy.polys.factortools (conditional), sympy.polys.polyclasses (conditional), sympy.polys.domains.groundtypes (conditional), sympy.polys.domains.finitefield (conditional)
|
||||
missing module named all - imported by sympy.testing.runtests (delayed, optional)
|
||||
missing module named 'IPython.Shell' - imported by sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.frontend' - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.terminal' - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.iplib' - imported by sympy.interactive.printing (delayed, optional)
|
||||
missing module named py - imported by mpmath.tests.runtests (delayed, conditional)
|
||||
missing module named 'sage.all' - imported by sympy.core.function (delayed)
|
||||
missing module named 'sage.interfaces' - imported by sympy.core.basic (delayed)
|
||||
missing module named 'cutlass_library.gemm_operation' - imported by torch._inductor.codegen.cuda.gemm_template (delayed), torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions (conditional)
|
||||
missing module named 'cutlass_library.library' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional), torch._inductor.codegen.cuda.gemm_template (delayed), torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions (conditional)
|
||||
missing module named 'cutlass_library.generator' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed)
|
||||
missing module named 'cutlass_library.manifest' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional)
|
||||
missing module named cutlass_library - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional)
|
||||
missing module named torch.multiprocessing._prctl_pr_set_pdeathsig - imported by torch.multiprocessing (top-level), torch.multiprocessing.spawn (top-level)
|
||||
missing module named 'torch.utils._config_typing' - imported by torch._dynamo.config (conditional), torch._inductor.config (conditional), torch._functorch.config (conditional)
|
||||
missing module named 'torch._C._functorch' - imported by torch._subclasses.fake_tensor (top-level), torch._subclasses.meta_utils (top-level), torch._functorch.pyfunctorch (top-level), torch._higher_order_ops.cond (top-level), torch._functorch.autograd_function (top-level), torch._functorch.utils (top-level), torch._functorch.vmap (top-level), torch._functorch.eager_transforms (top-level)
|
||||
missing module named torch.trunc - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.tanh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.tan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.square - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sqrt - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sinh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sin - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.signbit - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sign - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.round - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.reciprocal - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.rad2deg - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.negative - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_not - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log2 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log1p - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log10 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isnan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isinf - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isfinite - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.floor - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.expm1 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.exp2 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.exp - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.deg2rad - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.cosh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.cos - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.conj_physical - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.ceil - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_not - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctanh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arcsinh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arcsin - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arccosh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arccos - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.absolute - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.true_divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.subtract - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.remainder - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.pow - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.not_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.nextafter - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.multiply - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.minimum - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.maximum - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_xor - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_or - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_and - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logaddexp2 - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logaddexp - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.less_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.less - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.ldexp - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.lcm - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.hypot - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.heaviside - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.greater_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.greater - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.gcd - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmod - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmin - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmax - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.floor_divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.float_power - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.eq - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.copysign - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_xor - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_right_shift - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_or - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_left_shift - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_and - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctan2 - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.add - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch_xla - imported by torch._functorch.fx_minifier (delayed), huggingface_hub.serialization._torch (delayed, conditional)
|
||||
missing module named deeplearning - imported by torch._inductor.fx_passes.group_batch_fusion (optional)
|
||||
missing module named torch._inductor.fx_passes.fb - imported by torch._inductor.fx_passes (delayed, conditional), torch._inductor.fx_passes.pre_grad (delayed, conditional)
|
||||
missing module named 'torch_xla.distributed' - imported by torch.distributed.tensor._api (delayed, conditional, optional)
|
||||
missing module named torchdistx - imported by torch.distributed.fsdp._init_utils (optional)
|
||||
missing module named 'torch._C._distributed_rpc' - imported by torch.distributed.rpc (conditional), torch.distributed.rpc.api (top-level), torch.distributed.rpc.constants (top-level), torch.distributed.rpc.internal (top-level), torch.distributed.rpc.options (top-level), torch._jit_internal (conditional)
|
||||
missing module named foo - imported by torch._functorch.compilers (delayed)
|
||||
missing module named torch.broadcast_shapes - imported by torch (top-level), torch._numpy._funcs_impl (top-level)
|
||||
missing module named torch._numpy.float_ - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.max - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.isnan - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.signbit - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.real - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.isscalar - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.iscomplexobj - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.imag - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.intp - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named torch._numpy.empty - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named torch._numpy.arange - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named 'onnxscript._framework_apis' - imported by torch.onnx._internal._exporter_legacy (delayed), torch.onnx._internal._lazy_import (conditional)
|
||||
missing module named onnxscript - imported by torch.onnx._internal.fx.registration (conditional), torch.onnx._internal._exporter_legacy (delayed, conditional, optional), torch.onnx._internal.fx.diagnostics (top-level), torch.onnx._internal.fx.onnxfunction_dispatcher (conditional), torch.onnx._internal.fx.fx_onnx_interpreter (top-level), torch.onnx._internal.onnxruntime (delayed, conditional, optional), torch.onnx._internal._lazy_import (conditional), torch.onnx._internal.exporter._core (top-level), torch.onnx._internal.exporter._dispatching (top-level), torch.onnx._internal.exporter._schemas (top-level), torch.onnx._internal.exporter._registration (conditional), torch.onnx._internal.exporter._building (top-level), torch.onnx._internal.exporter._tensors (top-level), torch.onnx._internal.exporter._ir_passes (top-level), torch.onnx._internal.exporter._reporting (conditional)
|
||||
missing module named 'onnx.onnx_cpp2py_export.defs' - imported by onnx.defs (top-level), onnx.reference.ops._op_list (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.version_converter' - imported by onnx.version_converter (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.shape_inference' - imported by onnx.shape_inference (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.printer' - imported by onnx.printer (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.parser' - imported by onnx.parser (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.checker' - imported by onnx.checker (top-level)
|
||||
missing module named pyinstrument - imported by torch.onnx._internal.exporter._core (delayed, conditional)
|
||||
missing module named 'onnxscript.ir' - imported by torch.onnx._internal.exporter._core (top-level), torch.onnx._internal.exporter._building (top-level)
|
||||
missing module named 'onnxscript.evaluator' - imported by torch.onnx._internal.exporter._core (top-level)
|
||||
missing module named onnxruntime.capi.build_and_package_info - imported by onnxruntime.capi.onnxruntime_validation (delayed, conditional, optional)
|
||||
missing module named 'onnxruntime.training' - imported by onnxruntime.capi.onnxruntime_validation (delayed, optional)
|
||||
missing module named 'onnxscript.function_libs' - imported by torch.onnx._internal.fx.diagnostics (top-level), torch.onnx._internal.fx.onnxfunction_dispatcher (conditional), torch.onnx._internal.fx.decomposition_skip (top-level), torch.onnx._internal.fx.fx_onnx_interpreter (top-level), torch.onnx._internal.exporter._ir_passes (delayed, optional)
|
||||
missing module named 'onnx.defs.OpSchema' - imported by torch.onnx._internal.fx.type_utils (conditional)
|
||||
missing module named transformers - imported by torch.onnx._internal.fx.patcher (delayed, conditional, optional), torch.onnx._internal.fx.dynamo_graph_extractor (delayed, optional), nncf.data.generators (delayed, optional), torch._dynamo.variables.dicts (delayed), torch.testing._internal.common_distributed (delayed, optional)
|
||||
missing module named accimage - imported by torchvision.transforms.transforms (optional), torchvision.transforms.functional (optional), torchvision.transforms._functional_pil (optional), torchvision.datasets.folder (delayed)
|
||||
missing module named torch.ao.quantization.QuantStub - imported by torch.ao.quantization (top-level), torchvision.models.quantization.mobilenetv2 (top-level), torchvision.models.quantization.mobilenetv3 (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.DeQuantStub - imported by torch.ao.quantization (top-level), torchvision.models.quantization.mobilenetv2 (top-level), torchvision.models.quantization.mobilenetv3 (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named 'monkeytype.tracing' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'monkeytype.db' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'monkeytype.config' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named monkeytype - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'torch._C._jit_tree_views' - imported by torch._sources (top-level), torch.jit.frontend (top-level)
|
||||
missing module named wcwidth - imported by tabulate (optional)
|
||||
missing module named torch.ao.quantization.QConfig - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.qconfig_mapping_utils (top-level), torch.ao.quantization.fx.lstm_utils (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QConfigMapping - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.custom_config (top-level), torch.ao.ns.fx.n_shadows_utils (top-level), torch.ao.ns.fx.qconfig_multi_mapping (top-level), torch.ao.ns._numeric_suite_fx (top-level), torch.ao.quantization.fx.lstm_utils (top-level), torch.ao.quantization.pt2e.prepare (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QuantType - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.utils (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QConfigAny - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.utils (top-level)
|
||||
missing module named torch.ao.quantization.float_qparams_weight_only_qconfig - imported by torch.ao.quantization (delayed, conditional), torch.ao.nn.quantized.modules.embedding_ops (delayed, conditional), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named pycocotools - imported by torchvision.datasets.coco (delayed), torchvision.tv_tensors._dataset_wrapper (delayed)
|
||||
missing module named gdown - imported by torchvision.datasets.utils (delayed, optional)
|
||||
missing module named 'IPython.utils' - imported by h5py.ipy_completer (top-level)
|
||||
missing module named mpi4py - imported by h5py._hl.files (delayed)
|
||||
missing module named lmdb - imported by torchvision.datasets.lsun (delayed)
|
||||
missing module named 'onnxscript.rewriter' - imported by torch.onnx._internal.onnxruntime (delayed, conditional, optional)
|
||||
missing module named 'torch._C._onnx' - imported by torch.onnx (top-level), torch.onnx.utils (top-level), torch.onnx.symbolic_helper (top-level), torch.onnx._globals (top-level), torch.onnx.symbolic_opset9 (top-level), torch.onnx.symbolic_opset10 (top-level), torch.onnx.symbolic_opset13 (top-level), torch.onnx._experimental (top-level), torch.onnx.verification (top-level)
|
||||
missing module named torchrec - imported by torch._dynamo.variables.user_defined (delayed)
|
||||
missing module named 'torch._C._lazy_ts_backend' - imported by torch._lazy.ts_backend (top-level), torch._lazy.computation (top-level)
|
||||
missing module named 'torch._C._lazy' - imported by torch._lazy (top-level), torch._lazy.device_context (top-level), torch._lazy.metrics (top-level), torch._lazy.computation (top-level), torch._lazy.config (top-level), torch._lazy.debug (top-level), torch._lazy.ir_cache (top-level)
|
||||
missing module named hypothesis - imported by torch.testing._internal.common_utils (optional), torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named 'numba.cuda' - imported by torch.testing._internal.common_cuda (conditional, optional)
|
||||
missing module named 'xmlrunner.result' - imported by torch.testing._internal.common_utils (delayed, conditional)
|
||||
missing module named xmlrunner - imported by torch.testing._internal.common_utils (delayed, conditional)
|
||||
missing module named expecttest - imported by torch.testing._internal.common_utils (top-level)
|
||||
missing module named '_pytest.recwarn' - imported by torch._dynamo.variables.user_defined (delayed, optional)
|
||||
missing module named _pytest - imported by torch._dynamo.variables.user_defined (delayed, optional)
|
||||
missing module named 'torch._C._dynamo' - imported by torch._guards (top-level), torch._dynamo.convert_frame (top-level), torch._dynamo.guards (top-level), torch._dynamo.eval_frame (top-level), torch._dynamo.decorators (conditional), torch._dynamo.types (top-level)
|
||||
missing module named pygraphviz - imported by networkx.drawing.nx_agraph (delayed, optional)
|
||||
missing module named 'triton.backends' - imported by torch._inductor.runtime.triton_heuristics (conditional, optional)
|
||||
missing module named 'triton.testing' - imported by torch._inductor.runtime.benchmarking (delayed, optional), torch._inductor.utils (delayed)
|
||||
missing module named 'torch_xla.core' - imported by huggingface_hub.serialization._torch (delayed, conditional, optional), torch._dynamo.testing (delayed, conditional), torch._dynamo.backends.torchxla (delayed, optional)
|
||||
missing module named torch.float16 - imported by torch (delayed, conditional), torch._inductor.codegen.cpp_wrapper_cuda (delayed, conditional)
|
||||
missing module named torch.bfloat16 - imported by torch (delayed, conditional), torch._inductor.codegen.cpp_wrapper_cuda (delayed, conditional)
|
||||
missing module named torch.ScriptObject - imported by torch (delayed), torch.export.graph_signature (delayed)
|
||||
missing module named moviepy - imported by torch.utils.tensorboard.summary (delayed, optional)
|
||||
missing module named 'torch._C._monitor' - imported by torch.monitor (top-level)
|
||||
missing module named 'libfb.py' - imported by torch._dynamo.debug_utils (conditional), torch._inductor.codecache (delayed, conditional), torch._inductor.compile_worker.subproc_pool (delayed, conditional)
|
||||
missing module named 'torch._inductor.fb' - imported by torch._inductor.runtime.autotune_cache (delayed, conditional, optional), torch._inductor.cpp_builder (conditional), torch._inductor.graph (conditional), torch._inductor.codecache (delayed, conditional, optional), torch._inductor.compile_fx (delayed, conditional, optional)
|
||||
missing module named 'triton.fb' - imported by torch._inductor.cpp_builder (conditional), torch._inductor.codecache (conditional)
|
||||
missing module named rfe - imported by torch._inductor.remote_cache (conditional)
|
||||
missing module named redis - imported by torch._inductor.remote_cache (optional)
|
||||
missing module named 'ck4inductor.universal_gemm' - imported by torch._inductor.utils (delayed, optional)
|
||||
missing module named ck4inductor - imported by torch._inductor.utils (delayed, optional)
|
||||
missing module named libfb - imported by torch._inductor.config (conditional, optional)
|
||||
missing module named amdsmi - imported by torch.cuda (conditional, optional), torch.cuda.memory (delayed, conditional, optional)
|
||||
missing module named pynvml - imported by torch.cuda (delayed, conditional, optional), torch.cuda.memory (delayed, conditional, optional)
|
||||
missing module named torch.device - imported by torch (top-level), torch.types (top-level), torch.nn.modules.module (top-level), torch.cuda (top-level), torch._library.infer_schema (top-level), torch._inductor.graph (top-level), torch.distributed.nn.api.remote_module (top-level), torch.xpu (top-level), torch.cpu (top-level), torch.mtia (top-level)
|
||||
missing module named 'torch._C._profiler' - imported by torch.utils._traceback (delayed), torch.profiler (top-level), torch.autograd.profiler (top-level), torch.profiler.profiler (top-level), torch.profiler._memory_profiler (top-level), torch.cuda._memory_viz (delayed), torch.testing._internal.logging_tensor (top-level), torch.autograd (top-level), torch.profiler._pattern_matcher (top-level)
|
||||
missing module named 'torch._C._autograd' - imported by torch._subclasses.meta_utils (top-level), torch.profiler (top-level), torch.profiler._memory_profiler (top-level), torch.autograd (top-level)
|
||||
missing module named z3 - imported by torch.fx.experimental.validator (optional), torch.fx.experimental.migrate_gradual_types.transform_to_z3 (optional), torch.fx.experimental.migrate_gradual_types.z3_types (optional)
|
||||
missing module named torch.Size - imported by torch (top-level), torch.types (top-level), torch.nn.modules.normalization (top-level)
|
||||
missing module named torch.nn.Sequential - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ParameterList - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ParameterDict - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ModuleList - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ModuleDict - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ReLU - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Linear - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv3d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv2d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv1d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm3d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm2d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm1d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Module - imported by torch.nn (top-level), torch.optim.swa_utils (top-level), torch.ao.quantization.fake_quantize (top-level), torch.jit._recursive (top-level), torch.jit._script (top-level), torch.jit._trace (top-level), torch._dynamo.mutation_guard (top-level), torch.fx.passes.utils.common (top-level), torch.distributed.nn.api.remote_module (top-level), torchaudio.models.wav2vec2.utils.import_fairseq (top-level), torchaudio.models.wav2vec2.model (top-level), torchaudio.models.wav2vec2.components (top-level), torchaudio.models.wav2vec2.utils.import_huggingface (top-level), torchaudio.pipelines._wav2vec2.impl (top-level), nncf.torch.utils (top-level), nncf.torch.debug (top-level), nncf.common.factory (delayed, conditional), nncf.torch.model_creation (top-level), torch.fx.experimental.proxy_tensor (top-level)
|
||||
missing module named torch.qscheme - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torch.layout - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torch.DispatchKey - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torchaudio._internal.fb - imported by torchaudio._internal (optional)
|
||||
missing module named sentencepiece - imported by torchaudio.pipelines.rnnt_pipeline (delayed)
|
||||
missing module named dp - imported by torchaudio.pipelines._tts.utils (delayed)
|
||||
missing module named kaldi_io - imported by torchaudio.kaldi_io (delayed)
|
||||
missing module named av.video._VideoCodecName - imported by av.video (top-level), av.codec.context (top-level), av.container.output (top-level)
|
||||
missing module named av.audio._AudioCodecName - imported by av.audio (top-level), av.codec.context (top-level), av.container.output (top-level)
|
||||
missing module named torcharrow - imported by torch.utils.data.datapipes.iter.callable (delayed, conditional, optional)
|
||||
missing module named _dbm - imported by dbm.ndbm (top-level)
|
||||
missing module named _gdbm - imported by dbm.gnu (top-level)
|
||||
missing module named diff - imported by dill._dill (delayed, conditional, optional)
|
||||
missing module named dill.diff - imported by dill (delayed, conditional, optional), dill._dill (delayed, conditional, optional)
|
||||
missing module named version - imported by dill (optional)
|
||||
missing module named 'jax.typing' - imported by optree.integrations.jax (top-level)
|
||||
missing module named 'jax._src' - imported by optree.integrations.jax (top-level), keras.src.backend.jax.nn (delayed, optional)
|
||||
missing module named 'torch._C._distributed_autograd' - imported by torch.distributed.autograd (conditional)
|
||||
missing module named 'einops._torch_specific' - imported by torch._dynamo.decorators (delayed, optional)
|
||||
missing module named einops - imported by torch._dynamo.decorators (delayed)
|
||||
missing module named 'tensorflow.saved_model' - imported by keras.src.export.saved_model (delayed)
|
||||
missing module named keras.src.backend.random_seed_dtype - imported by keras.src.backend (delayed), keras.src.random.seed_generator (delayed)
|
||||
missing module named keras.src.backend.convert_to_tensor - imported by keras.src.backend (delayed), keras.src.random.seed_generator (delayed)
|
||||
missing module named 'openvino._pyopenvino.util' - imported by openvino.utils (delayed), openvino.runtime.utils (top-level)
|
||||
missing module named 'openvino._pyopenvino.op' - imported by openvino.runtime.op (top-level), openvino.runtime.op.util (top-level), nncf.openvino.optimized_functions.models (top-level)
|
||||
missing module named 'jax.nn' - imported by keras.src.backend.jax.nn (delayed, optional)
|
||||
missing module named 'jax.scipy' - imported by keras.src.backend.jax.linalg (top-level)
|
||||
missing module named 'tensorflow.experimental' - imported by keras.src.backend.tensorflow.distribution_lib (top-level)
|
||||
missing module named 'tensorflow.summary' - imported by keras.src.callbacks.tensorboard (delayed, conditional)
|
||||
missing module named pygments.lexers.PrologLexer - imported by pygments.lexers (top-level), pygments.lexers.cplint (top-level)
|
||||
missing module named ctags - imported by pygments.formatters.html (optional)
|
||||
missing module named linkify_it - imported by markdown_it.main (optional)
|
||||
missing module named pydantic - imported by huggingface_hub.utils._runtime (delayed, optional), huggingface_hub._webhooks_payload (conditional)
|
||||
missing module named 'google.colab' - imported by huggingface_hub.utils._auth (delayed, optional), plotly.io._renderers (conditional, optional)
|
||||
missing module named hf_transfer - imported by huggingface_hub.file_download (delayed, conditional, optional), huggingface_hub.lfs (delayed, optional)
|
||||
missing module named hf_xet - imported by huggingface_hub.file_download (delayed, optional), huggingface_hub._commit_api (delayed)
|
||||
missing module named 'mcp.client' - imported by huggingface_hub.inference._mcp.mcp_client (delayed, conditional)
|
||||
missing module named mcp - imported by huggingface_hub.inference._mcp.utils (conditional), huggingface_hub.inference._mcp.mcp_client (delayed, conditional)
|
||||
missing module named fastai - imported by huggingface_hub.fastai_utils (delayed)
|
||||
missing module named 'fastapi.responses' - imported by huggingface_hub._oauth (delayed, optional), huggingface_hub._webhooks_server (conditional)
|
||||
missing module named fastapi - imported by huggingface_hub._oauth (delayed, conditional, optional), huggingface_hub._webhooks_server (conditional)
|
||||
missing module named gradio - imported by huggingface_hub._webhooks_server (delayed, conditional)
|
||||
missing module named tensorboardX - imported by huggingface_hub._tensorboard_logger (conditional, optional)
|
||||
missing module named 'starlette.datastructures' - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named 'authlib.integrations' - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named authlib - imported by huggingface_hub._oauth (delayed, optional), streamlit.auth_util (delayed, optional)
|
||||
missing module named starlette - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named 'ipywidgets.widgets' - imported by huggingface_hub._login (delayed, optional)
|
||||
missing module named 'InquirerPy.separator' - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named 'InquirerPy.base' - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named InquirerPy - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named pydotplus - imported by keras.src.utils.model_visualization (optional), tensorflow.python.keras.utils.vis_utils (optional)
|
||||
missing module named pydot_ng - imported by keras.src.utils.model_visualization (optional), tensorflow.python.keras.utils.vis_utils (optional)
|
||||
missing module named keras.src.ops.convert_to_tensor - imported by keras.src.ops (top-level), keras.src.utils.torch_utils (top-level)
|
||||
missing module named keras.src.ops.convert_to_numpy - imported by keras.src.ops (top-level), keras.src.utils.torch_utils (top-level)
|
||||
missing module named keras.src.backend.random - imported by keras.src.backend (top-level), keras.src.ops (top-level), keras.src.testing.test_case (delayed), keras.src.initializers.random_initializers (top-level)
|
||||
missing module named keras.src.backend.is_tensor - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.backend.cond - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.backend.cast - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.engine - imported by keras.src (conditional), nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named flax - imported by keras.src.utils.jax_layer (delayed)
|
||||
missing module named array_api_strict - imported by sklearn.utils._array_api (delayed, conditional, optional)
|
||||
missing module named sklearn.externals.array_api_compat.common.array_namespace - imported by sklearn.externals.array_api_compat.common (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named cupy_backends - imported by sklearn.externals.array_api_compat.common._helpers (delayed)
|
||||
missing module named torch.outer - imported by torch (top-level), sklearn.externals.array_api_compat.torch.linalg (top-level)
|
||||
missing module named 'cupy.linalg' - imported by sklearn.externals.array_api_compat.cupy.linalg (top-level)
|
||||
missing module named 'cupy.fft' - imported by sklearn.externals.array_api_compat.cupy.fft (top-level)
|
||||
missing module named array_api_compat - imported by sklearn.externals.array_api_extra._lib._utils._compat (optional)
|
||||
missing module named 'numpydoc.docscrape' - imported by sklearn.utils._testing (delayed), skimage._shared.utils (delayed, optional)
|
||||
missing module named numpydoc - imported by sklearn.utils._testing (delayed, optional)
|
||||
missing module named 'distributed.utils' - imported by joblib._dask (conditional, optional)
|
||||
missing module named 'dask.utils' - imported by joblib._dask (conditional)
|
||||
missing module named 'dask.sizeof' - imported by joblib._dask (conditional)
|
||||
missing module named 'dask.distributed' - imported by joblib._dask (conditional)
|
||||
missing module named viztracer - imported by joblib.externals.loky.initializers (delayed, optional)
|
||||
missing module named 'lz4.frame' - imported by joblib.compressor (optional)
|
||||
missing module named pyamg - imported by sklearn.manifold._spectral_embedding (delayed, conditional, optional)
|
||||
missing module named keras.engine - imported by keras (conditional), nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'tf_keras.optimizers' - imported by tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named tf_keras - imported by tensorflow.python.util.lazy_loader (delayed, conditional, optional), tensorflow.python.saved_model.load (delayed, conditional, optional), huggingface_hub.keras_mixin (conditional, optional)
|
||||
missing module named objgraph - imported by tensorflow.python.distribute.test_util (optional)
|
||||
missing module named tblib - imported by tensorflow.python.distribute.multi_process_runner (optional)
|
||||
missing module named tensorflow.python.framework.fast_tensor_util - imported by tensorflow.python.framework (optional), tensorflow.python.framework.tensor_util (optional)
|
||||
missing module named portpicker - imported by tensorflow.python.framework.test_util (delayed), tensorflow.dtensor.python.tests.multi_client_test_util (top-level), tensorflow.python.debug.lib.grpc_debug_test_server (top-level)
|
||||
missing module named 'tensorflow.python.framework.is_mlir_bridge_test_true' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'tensorflow.python.framework.is_mlir_bridge_test_false' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'tensorflow.python.framework.is_xla_test_true' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named tensorflow.python.keras.__version__ - imported by tensorflow.python.keras (delayed), tensorflow.python.keras.saving.saving_utils (delayed), tensorflow.python.keras.saving.hdf5_format (delayed), tensorflow.python.keras.engine.training (delayed)
|
||||
missing module named tensorflow.python.keras.layers.wrappers - imported by tensorflow.python.keras.layers (delayed), tensorflow.python.keras.utils.vis_utils (delayed)
|
||||
missing module named 'six.moves.urllib.request' - imported by tensorflow.python.keras.utils.data_utils (top-level)
|
||||
missing module named 'tensorflow.python.training.tracking' - imported by openvino.frontend.tensorflow.utils (delayed, optional)
|
||||
missing module named paddle - imported by openvino.tools.ovc.moc_frontend.shape_utils (delayed, conditional), openvino.tools.ovc.moc_frontend.type_utils (delayed, conditional), openvino.tools.ovc.moc_frontend.paddle_frontend_utils (delayed, optional), openvino.tools.ovc.convert_impl (delayed, conditional)
|
||||
missing module named 'conda.cli' - imported by torch.utils.benchmark.examples.blas_compare_setup (optional)
|
||||
missing module named conda - imported by torch.utils.benchmark.examples.blas_compare_setup (optional)
|
||||
missing module named 'hypothesis.strategies' - imported by torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named 'hypothesis.extra' - imported by torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named torch.tensor - imported by torch (top-level), torch.utils.benchmark.utils.compare (top-level)
|
||||
missing module named torch.TensorType - imported by torch (top-level), torch.jit._passes._property_propagation (top-level)
|
||||
missing module named 'torch._C._distributed_rpc_testing' - imported by torch.distributed.rpc._testing (conditional)
|
||||
missing module named etcd - imported by torch.distributed.elastic.rendezvous.etcd_rendezvous (top-level), torch.distributed.elastic.rendezvous.etcd_store (top-level), torch.distributed.elastic.rendezvous.etcd_rendezvous_backend (top-level), torch.distributed.elastic.rendezvous.etcd_server (optional)
|
||||
missing module named 'torch.distributed.elastic.metrics.static_init' - imported by torch.distributed.elastic.metrics (optional)
|
||||
missing module named 'coremltools.models' - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named 'coremltools.converters' - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named coremltools - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named pytorch_lightning - imported by torch.ao.pruning._experimental.data_sparsifier.lightning.callbacks.data_sparsity (top-level)
|
||||
missing module named fbscribelogger - imported by torch._logging.scribe (optional)
|
||||
missing module named 'tvm.contrib' - imported by torch._dynamo.backends.tvm (delayed)
|
||||
missing module named tvm - imported by torch._dynamo.backends.tvm (delayed, conditional)
|
||||
missing module named 'torch._C._VariableFunctions' - imported by torch (conditional)
|
||||
missing module named 'tensorflow.contrib' - imported by tensorflow.python.tools.import_pb_to_tensorboard (optional)
|
||||
missing module named memory_profiler - imported by tensorflow.python.eager.memory_tests.memory_test_util (optional)
|
||||
missing module named six.moves.urllib.request - imported by six.moves.urllib (top-level), tensorflow.python.distribute.failure_handling.failure_handling_util (top-level)
|
||||
missing module named grpc_reflection - imported by grpc (optional)
|
||||
missing module named grpc_health - imported by grpc (optional)
|
||||
missing module named grpc_tools - imported by grpc._runtime_protos (delayed, optional), grpc (optional)
|
||||
missing module named 'grpc_tools.protoc' - imported by grpc._runtime_protos (delayed, conditional)
|
||||
missing module named tflite_runtime - imported by tensorflow.lite.python.metrics.metrics (conditional), tensorflow.lite.python.interpreter (conditional), tensorflow.lite.python.analyzer (conditional), tensorflow.lite.tools.visualize (conditional)
|
||||
missing module named awq - imported by openvino.frontend.pytorch.quantized (delayed, conditional, optional)
|
||||
missing module named 'transformers.pytorch_utils' - imported by openvino.frontend.pytorch.patch_model (delayed, optional)
|
||||
missing module named 'jax.lax' - imported by openvino.frontend.jax.passes (top-level)
|
||||
missing module named 'jax.core' - imported by openvino.frontend.jax.jaxpr_decoder (top-level)
|
||||
missing module named 'keras.src.utils.control_flow_util' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.src.engine.keras_tensor' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.utils.control_flow_util' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.engine.keras_tensor' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named rpds.List - imported by rpds (top-level), referencing._core (top-level)
|
||||
missing module named rpds.HashTrieSet - imported by rpds (top-level), referencing._core (top-level)
|
||||
missing module named rpds.HashTrieMap - imported by rpds (top-level), referencing._core (top-level), jsonschema._types (top-level), jsonschema.validators (top-level)
|
||||
missing module named isoduration - imported by jsonschema._format (top-level)
|
||||
missing module named uri_template - imported by jsonschema._format (top-level)
|
||||
missing module named jsonpointer - imported by jsonschema._format (top-level)
|
||||
missing module named webcolors - imported by jsonschema._format (top-level)
|
||||
missing module named rfc3339_validator - imported by jsonschema._format (top-level)
|
||||
missing module named rfc3986_validator - imported by jsonschema._format (optional)
|
||||
missing module named rfc3987 - imported by jsonschema._format (optional)
|
||||
missing module named fqdn - imported by jsonschema._format (top-level)
|
||||
missing module named openvino.properties.hint.inference_precision - imported by openvino.properties.hint (top-level), nncf.quantization.algorithms.accuracy_control.openvino_backend (top-level), nncf.openvino.engine (top-level)
|
||||
missing module named 'openvino._pyopenvino.properties' - imported by openvino.runtime.properties (top-level), openvino.runtime.properties.hint (top-level), openvino.properties (top-level), openvino.properties.hint (top-level), openvino.properties.intel_cpu (top-level), openvino.properties.intel_gpu (top-level), openvino.properties.intel_auto (top-level), openvino.properties.device (top-level), openvino.properties.log (top-level), openvino.properties.streams (top-level), nncf.openvino.optimized_functions.models (top-level)
|
||||
missing module named 'openvino._pyopenvino._offline_transformations' - imported by openvino._offline_transformations (top-level)
|
||||
missing module named 'transformers.utils' - imported by nncf.data.generators (delayed, optional)
|
||||
missing module named icu - imported by natsort.compat.locale (optional), natsort.natsort (conditional, optional)
|
||||
missing module named fastnumbers - imported by natsort.compat.fastnumbers (conditional, optional)
|
||||
missing module named 'openvino._pyopenvino.preprocess' - imported by openvino.preprocess (top-level)
|
||||
missing module named gitdb_speedups - imported by gitdb.fun (optional)
|
||||
missing module named 'gitdb_speedups._perf' - imported by gitdb.stream (optional), gitdb.pack (optional)
|
||||
missing module named sha - imported by gitdb.util (delayed, optional)
|
||||
missing module named _watchdog_fsevents - imported by watchdog.observers.fsevents (top-level)
|
||||
missing module named polars - imported by narwhals.dependencies (conditional), narwhals.utils (delayed, conditional), narwhals.schema (delayed, conditional), narwhals._compliant.series (conditional), narwhals._arrow.dataframe (delayed, conditional), narwhals._pandas_like.series (delayed, conditional), narwhals._pandas_like.dataframe (delayed, conditional), narwhals._polars.dataframe (top-level), narwhals._polars.namespace (top-level), narwhals._polars.expr (top-level), narwhals._polars.utils (top-level), narwhals._polars.series (top-level), narwhals._dask.dataframe (delayed, conditional), narwhals._duckdb.dataframe (delayed, conditional), narwhals._arrow.series (delayed, conditional), narwhals.series (conditional), narwhals.dataframe (conditional), narwhals._compliant.dataframe (conditional), narwhals._namespace (conditional), narwhals._ibis.dataframe (delayed, conditional), narwhals._spark_like.dataframe (delayed, conditional), streamlit.dataframe_util (delayed, conditional), streamlit.runtime.caching.hashing (delayed, conditional)
|
||||
missing module named xarray - imported by plotly.express._imshow (optional), streamlit.dataframe_util (delayed, conditional)
|
||||
missing module named 'authlib.jose' - imported by streamlit.auth_util (delayed, optional)
|
||||
missing module named sniffio - imported by tenacity.asyncio (delayed, conditional)
|
||||
missing module named trio - imported by tenacity.asyncio (delayed, conditional)
|
||||
missing module named 'sqlalchemy.exc' - imported by streamlit.connections.sql_connection (delayed)
|
||||
missing module named 'sqlalchemy.orm' - imported by streamlit.connections.sql_connection (delayed, conditional)
|
||||
missing module named snowflake - imported by streamlit.connections.util (delayed, optional)
|
||||
missing module named 'snowflake.snowpark' - imported by streamlit.connections.snowflake_connection (delayed, conditional), streamlit.connections.snowpark_connection (delayed, conditional)
|
||||
missing module named 'snowflake.connector' - imported by streamlit.connections.snowflake_connection (delayed, conditional)
|
||||
missing module named 'pyarrow._stubs_typing' - imported by narwhals._arrow.typing (conditional)
|
||||
missing module named 'pyarrow.__lib_pxi' - imported by narwhals._arrow.typing (conditional)
|
||||
missing module named dask_expr - imported by narwhals._dask.utils (conditional, optional), narwhals._dask.group_by (conditional, optional)
|
||||
missing module named 'polars.lazyframe' - imported by narwhals._polars.group_by (conditional)
|
||||
missing module named 'polars.dataframe' - imported by narwhals._polars.group_by (conditional)
|
||||
missing module named 'duckdb.typing' - imported by narwhals._duckdb.utils (conditional), narwhals._duckdb.expr (top-level), narwhals._duckdb.namespace (top-level), narwhals._duckdb.dataframe (conditional)
|
||||
missing module named 'sqlframe._version' - imported by narwhals.utils (delayed, conditional)
|
||||
missing module named ibis - imported by narwhals.dependencies (conditional), narwhals.utils (delayed, conditional), narwhals._ibis.namespace (top-level), narwhals._ibis.dataframe (top-level), narwhals._ibis.utils (top-level), narwhals._ibis.expr (top-level)
|
||||
missing module named sqlframe - imported by narwhals.utils (delayed, conditional)
|
||||
missing module named duckdb - imported by narwhals.dependencies (conditional), narwhals._arrow.dataframe (delayed, conditional), narwhals._duckdb.dataframe (top-level), narwhals._duckdb.utils (top-level), narwhals._duckdb.expr (top-level), narwhals._duckdb.expr_dt (top-level), narwhals._duckdb.expr_list (top-level), narwhals._duckdb.expr_str (top-level), narwhals._duckdb.expr_struct (top-level), narwhals._duckdb.namespace (top-level), narwhals._duckdb.selectors (conditional), narwhals._duckdb.group_by (conditional), narwhals._duckdb.series (conditional), narwhals._polars.dataframe (delayed, conditional), narwhals._pandas_like.dataframe (delayed, conditional), narwhals.utils (delayed, conditional), narwhals._namespace (conditional)
|
||||
missing module named 'dask.dataframe' - imported by narwhals.dependencies (conditional), narwhals._dask.namespace (top-level), narwhals._polars.dataframe (delayed, conditional), narwhals._dask.dataframe (top-level), narwhals._dask.utils (conditional, optional), narwhals._dask.expr_dt (conditional), narwhals._dask.expr_str (top-level), narwhals._dask.expr (conditional), narwhals._dask.group_by (top-level), narwhals._pandas_like.dataframe (delayed, conditional), narwhals._arrow.dataframe (delayed, conditional), narwhals._dask.selectors (conditional), narwhals.utils (delayed, conditional)
|
||||
missing module named 'pyspark.sql' - imported by narwhals.dependencies (delayed, conditional, optional), narwhals.utils (delayed, conditional), narwhals._namespace (conditional), narwhals._spark_like.utils (delayed, conditional)
|
||||
missing module named cudf - imported by narwhals.dependencies (conditional), narwhals.utils (delayed, conditional)
|
||||
missing module named 'modin.pandas' - imported by narwhals._pandas_like.dataframe (delayed, conditional), narwhals.utils (delayed, conditional)
|
||||
missing module named 'sqlframe.base' - imported by narwhals._spark_like.utils (delayed, conditional), narwhals._spark_like.expr_dt (conditional), narwhals._spark_like.expr_str (conditional), narwhals._spark_like.expr_struct (conditional), narwhals._spark_like.expr (delayed, conditional), narwhals._spark_like.selectors (conditional), narwhals._spark_like.namespace (conditional), narwhals._spark_like.dataframe (delayed, conditional), narwhals._spark_like.group_by (conditional), narwhals.dependencies (delayed, conditional)
|
||||
missing module named 'ibis.selectors' - imported by narwhals._ibis.dataframe (delayed)
|
||||
missing module named 'ibis.expr' - imported by narwhals._ibis.namespace (top-level), narwhals._ibis.dataframe (top-level), narwhals._ibis.utils (top-level), narwhals._ibis.expr_dt (conditional), narwhals._ibis.expr_str (top-level), narwhals._ibis.expr_struct (conditional), narwhals._ibis.expr (conditional), narwhals._ibis.group_by (conditional), narwhals._ibis.selectors (conditional)
|
||||
missing module named pyspark - imported by narwhals.dependencies (conditional)
|
||||
missing module named modin - imported by narwhals.dependencies (conditional)
|
||||
missing module named 'vegafusion.runtime' - imported by altair.utils._vegafusion_data (conditional)
|
||||
missing module named altair.vegalite.SCHEMA_VERSION - imported by altair.vegalite (delayed), altair.utils._importers (delayed)
|
||||
missing module named vl_convert - imported by altair.utils._importers (delayed, optional)
|
||||
missing module named vegafusion - imported by altair.utils._importers (delayed, optional)
|
||||
missing module named altair.vegalite.v5.SCHEMA_VERSION - imported by altair.vegalite.v5 (delayed), altair.vegalite.v5.compiler (delayed)
|
||||
missing module named anywidget - imported by plotly.basewidget (top-level), altair.jupyter (optional), altair.jupyter.jupyter_chart (top-level)
|
||||
missing module named altair.VConcatSpecGenericSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.VConcatChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.UnitSpecWithFrame - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.UnitSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelVConcatSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelUnitSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelLayerSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelHConcatSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelFacetSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelConcatSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.NonNormalizedSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.LayerSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.LayerChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.HConcatSpecGenericSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.HConcatChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.FacetSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.FacetedUnitSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.FacetChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.ConcatSpecGenericSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.ConcatChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.Chart - imported by altair (delayed), altair.vegalite.v5.display (delayed), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.renderers - imported by altair (delayed), altair.utils.mimebundle (delayed)
|
||||
missing module named altair.vegalite_compilers - imported by altair (delayed), altair.utils._vegafusion_data (delayed)
|
||||
missing module named altair.data_transformers - imported by altair (delayed), altair.utils._vegafusion_data (delayed), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.SchemaBase - imported by altair (conditional), altair.vegalite.v5.schema.channels (conditional)
|
||||
missing module named altair.Parameter - imported by altair (conditional), altair.vegalite.v5.schema.core (conditional), altair.vegalite.v5.schema.channels (conditional), altair.vegalite.v5.schema.mixins (conditional)
|
||||
missing module named skimage.measure.block_reduce - imported by skimage.measure (top-level), skimage.transform._warps (top-level)
|
||||
missing module named skimage.measure.label - imported by skimage.measure (top-level), skimage.restoration.inpaint (top-level)
|
||||
missing module named skimage.exposure.histogram - imported by skimage.exposure (top-level), skimage.filters.thresholding (top-level)
|
||||
missing module named skimage.exposure.is_low_contrast - imported by skimage.exposure (top-level), skimage.io._io (top-level), skimage.io._plugins.matplotlib_plugin (top-level)
|
||||
missing module named skimage.color.rgba2rgb - imported by skimage.color (delayed, conditional), skimage.exposure.exposure (delayed, conditional)
|
||||
missing module named skimage.color.rgb2gray - imported by skimage.color (top-level), skimage.measure._blur_effect (top-level), skimage.exposure.exposure (delayed, conditional)
|
||||
missing module named skimage.color.gray2rgb - imported by skimage.color (top-level), skimage.feature._daisy (top-level), skimage.feature.haar (top-level), skimage.feature.texture (top-level)
|
||||
missing module named skimage.transform.integral_image - imported by skimage.transform (top-level), skimage.feature.corner (top-level), skimage.filters.thresholding (top-level), skimage.feature.blob (top-level), skimage.feature.censure (top-level)
|
||||
missing module named skimage.transform.rescale - imported by skimage.transform (top-level), skimage.feature.sift (top-level)
|
||||
missing module named skimage.transform.pyramid_gaussian - imported by skimage.transform (top-level), skimage.feature.orb (top-level)
|
||||
missing module named skimage.draw.rectangle - imported by skimage.draw (top-level), skimage.feature.haar (top-level)
|
||||
missing module named skimage.transform.warp - imported by skimage.transform (top-level), skimage.filters._window (top-level)
|
||||
missing module named pooch - imported by skimage.data._fetchers (delayed, optional)
|
||||
missing module named 'zarr.core' - imported by tifffile.zarr (delayed, conditional, optional)
|
||||
missing module named 'zarr.abc' - imported by tifffile.zarr (optional)
|
||||
missing module named zarr - imported by tifffile.zarr (top-level)
|
||||
missing module named _imagecodecs - imported by tifffile.tifffile (delayed, conditional, optional)
|
||||
missing module named imagecodecs - imported by tifffile.tifffile (optional), imageio.plugins._tifffile (delayed, conditional, optional)
|
||||
missing module named compression - imported by tifffile._imagecodecs (delayed, optional)
|
||||
missing module named SimpleITK - imported by skimage.io._plugins.simpleitk_plugin (optional), imageio.plugins.simpleitk (delayed, optional)
|
||||
missing module named imread - imported by skimage.io._plugins.imread_plugin (optional)
|
||||
missing module named itk - imported by imageio.plugins.simpleitk (delayed, optional)
|
||||
missing module named rawpy - imported by imageio.plugins.rawpy (top-level)
|
||||
missing module named pillow_heif - imported by imageio.plugins.pillow (delayed, optional)
|
||||
missing module named 'osgeo.gdal' - imported by imageio.plugins.gdal (delayed, optional)
|
||||
missing module named 'astropy.io' - imported by imageio.plugins.fits (delayed, optional)
|
||||
missing module named imageio_ffmpeg - imported by imageio.plugins.ffmpeg (top-level)
|
||||
missing module named tkFileDialog - imported by imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named Tkinter - imported by imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named tifffile_geodb - imported by imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named imageio.plugins.tifffile_geodb - imported by imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named zstd - imported by imageio.plugins._tifffile (delayed, conditional, optional)
|
||||
missing module named 'backports.lzma' - imported by imageio.plugins._tifffile (delayed, conditional, optional)
|
||||
missing module named bsdf_cli - imported by imageio.plugins._bsdf (conditional)
|
||||
missing module named osgeo - imported by skimage.io._plugins.gdal_plugin (optional)
|
||||
missing module named astropy - imported by skimage.io._plugins.fits_plugin (optional)
|
||||
missing module named skimage.metrics.mean_squared_error - imported by skimage.metrics (top-level), skimage.restoration.j_invariant (top-level)
|
||||
missing module named pywt - imported by skimage.restoration._denoise (delayed, optional)
|
||||
missing module named skimage.filters.sobel - imported by skimage.filters (delayed), skimage.measure._blur_effect (delayed)
|
||||
missing module named BaseHTTPServer - imported by plotly.io._base_renderers (optional)
|
||||
missing module named 'statsmodels.api' - imported by plotly.express.trendline_functions (delayed)
|
||||
missing module named statsmodels - imported by plotly.express.trendline_functions (delayed)
|
||||
missing module named plotly.colors.sequential - imported by plotly.colors (top-level), plotly.express._core (top-level)
|
||||
missing module named plotly.colors.qualitative - imported by plotly.colors (top-level), plotly.express._core (top-level)
|
||||
missing module named plotly.colors.validate_scale_values - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.validate_colorscale - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.validate_colors_dict - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.validate_colors - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.unlabel_rgb - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.unconvert_from_RGB_255 - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.n_colors - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.label_rgb - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.hex_to_rgb - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.find_intermediate_color - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.convert_to_RGB_255 - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.colorscale_to_scale - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.colorscale_to_colors - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.color_parser - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.PLOTLY_SCALES - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.DEFAULT_PLOTLY_COLORS - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named 'plotly.version' - imported by plotly (conditional)
|
||||
missing module named choreographer - imported by plotly.io._kaleido (delayed, conditional)
|
||||
missing module named 'kaleido.errors' - imported by plotly.io._kaleido (delayed, conditional)
|
||||
missing module named 'kaleido.scopes' - imported by plotly.io._kaleido (conditional, optional)
|
||||
missing module named kaleido - imported by plotly.io._kaleido (delayed, conditional, optional)
|
||||
missing module named graphviz - imported by streamlit.type_util (conditional), streamlit.elements.graphviz_chart (conditional)
|
||||
missing module named 'bokeh.embed' - imported by streamlit.elements.bokeh_chart (delayed)
|
||||
missing module named bokeh - imported by streamlit.elements.bokeh_chart (delayed, conditional)
|
||||
missing module named ui - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
missing module named splash - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
BIN
qt_app_pyside1/build/FixedDebug/xref-FixedDebug.html
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/xref-FixedDebug.html
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/Analysis-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/Analysis-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/EXE-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/EXE-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/PKG-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/PKG-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/PYZ-00.pyz
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/PYZ-00.pyz
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/PYZ-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/PYZ-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/QuickDebug.pkg
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/QuickDebug.pkg
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/base_library.zip
Normal file
BIN
qt_app_pyside1/build/QuickDebug/base_library.zip
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod01_archive.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod01_archive.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod02_importers.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod02_importers.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod03_ctypes.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod03_ctypes.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod04_pywin32.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod04_pywin32.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/struct.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/struct.pyc
Normal file
Binary file not shown.
28
qt_app_pyside1/build/QuickDebug/warn-QuickDebug.txt
Normal file
28
qt_app_pyside1/build/QuickDebug/warn-QuickDebug.txt
Normal file
@@ -0,0 +1,28 @@
|
||||
|
||||
This file lists modules PyInstaller was not able to find. This does not
|
||||
necessarily mean this module is required for running your program. Python and
|
||||
Python 3rd-party packages include a lot of conditional or optional modules. For
|
||||
example the module 'ntpath' only exists on Windows, whereas the module
|
||||
'posixpath' only exists on Posix systems.
|
||||
|
||||
Types if import:
|
||||
* top-level: imported at the top-level - look at these first
|
||||
* conditional: imported within an if-statement
|
||||
* delayed: imported within a function
|
||||
* optional: imported within a try-except-statement
|
||||
|
||||
IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
|
||||
tracking down the missing module yourself. Thanks!
|
||||
|
||||
missing module named 'org.python' - imported by copy (optional)
|
||||
missing module named org - imported by pickle (optional)
|
||||
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional)
|
||||
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional)
|
||||
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional)
|
||||
missing module named resource - imported by posix (top-level)
|
||||
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional)
|
||||
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional)
|
||||
missing module named ui - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
missing module named splash - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
missing module named _posixsubprocess - imported by subprocess (conditional)
|
||||
missing module named fcntl - imported by subprocess (optional)
|
||||
BIN
qt_app_pyside1/build/QuickDebug/xref-QuickDebug.html
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/xref-QuickDebug.html
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/Analysis-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/Analysis-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/EXE-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/EXE-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/PKG-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/PKG-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/PYZ-00.pyz
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/PYZ-00.pyz
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/PYZ-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/PYZ-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/TrafficMonitor.pkg
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/TrafficMonitor.pkg
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/base_library.zip
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/base_library.zip
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/localpycs/struct.pyc
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/localpycs/struct.pyc
Normal file
Binary file not shown.
773
qt_app_pyside1/build/TrafficMonitor/warn-TrafficMonitor.txt
Normal file
773
qt_app_pyside1/build/TrafficMonitor/warn-TrafficMonitor.txt
Normal file
@@ -0,0 +1,773 @@
|
||||
|
||||
This file lists modules PyInstaller was not able to find. This does not
|
||||
necessarily mean this module is required for running your program. Python and
|
||||
Python 3rd-party packages include a lot of conditional or optional modules. For
|
||||
example the module 'ntpath' only exists on Windows, whereas the module
|
||||
'posixpath' only exists on Posix systems.
|
||||
|
||||
Types if import:
|
||||
* top-level: imported at the top-level - look at these first
|
||||
* conditional: imported within an if-statement
|
||||
* delayed: imported within a function
|
||||
* optional: imported within a try-except-statement
|
||||
|
||||
IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
|
||||
tracking down the missing module yourself. Thanks!
|
||||
|
||||
missing module named usercustomize - imported by site (delayed, optional)
|
||||
missing module named sitecustomize - imported by site (delayed, optional)
|
||||
missing module named 'org.python' - imported by copy (optional), xml.sax (delayed, conditional), setuptools.sandbox (conditional)
|
||||
missing module named org - imported by pickle (optional)
|
||||
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional), http.server (delayed, optional), webbrowser (delayed), psutil (optional), netrc (delayed, conditional), getpass (delayed), distutils.util (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), distutils.archive_util (optional), setuptools._distutils.util (delayed, conditional, optional), setuptools._distutils.archive_util (optional)
|
||||
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), distutils.archive_util (optional), setuptools._distutils.archive_util (optional)
|
||||
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional)
|
||||
missing module named resource - imported by posix (top-level), fsspec.asyn (conditional, optional), torch._inductor.codecache (delayed, conditional)
|
||||
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional), zipimport (top-level)
|
||||
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional), zipimport (top-level)
|
||||
missing module named _posixsubprocess - imported by subprocess (conditional), multiprocessing.util (delayed), joblib.externals.loky.backend.fork_exec (delayed)
|
||||
missing module named fcntl - imported by subprocess (optional), xmlrpc.server (optional), tqdm.utils (delayed, optional), filelock._unix (conditional, optional), absl.flags._helpers (optional), pty (delayed, optional), torch.testing._internal.distributed.distributed_test (conditional)
|
||||
missing module named win32evtlog - imported by logging.handlers (delayed, optional)
|
||||
missing module named win32evtlogutil - imported by logging.handlers (delayed, optional)
|
||||
missing module named startup - imported by pyreadline3.keysyms.common (conditional), pyreadline3.keysyms.keysyms (conditional)
|
||||
missing module named sets - imported by pyreadline3.keysyms.common (optional), pytz.tzinfo (optional)
|
||||
missing module named System - imported by pyreadline3.clipboard.ironpython_clipboard (top-level), pyreadline3.keysyms.ironpython_keysyms (top-level), pyreadline3.console.ironpython_console (top-level), pyreadline3.rlmain (conditional)
|
||||
missing module named console - imported by pyreadline3.console.ansi (conditional)
|
||||
missing module named clr - imported by pyreadline3.clipboard.ironpython_clipboard (top-level), pyreadline3.console.ironpython_console (top-level)
|
||||
missing module named IronPythonConsole - imported by pyreadline3.console.ironpython_console (top-level)
|
||||
missing module named vms_lib - imported by platform (delayed, optional)
|
||||
missing module named 'java.lang' - imported by platform (delayed, optional), xml.sax._exceptions (conditional)
|
||||
missing module named java - imported by platform (delayed)
|
||||
missing module named _winreg - imported by platform (delayed, optional), pygments.formatters.img (optional)
|
||||
missing module named termios - imported by tty (top-level), getpass (optional), tqdm.utils (delayed, optional), absl.flags._helpers (optional)
|
||||
missing module named pyimod02_importers - imported by C:\Users\jatin\.conda\envs\traffic_monitor\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgutil.py (delayed), C:\Users\jatin\.conda\envs\traffic_monitor\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgres.py (delayed)
|
||||
missing module named _manylinux - imported by packaging._manylinux (delayed, optional), setuptools._vendor.packaging._manylinux (delayed, optional), wheel.vendored.packaging._manylinux (delayed, optional)
|
||||
missing module named '_typeshed.importlib' - imported by pkg_resources (conditional)
|
||||
missing module named _typeshed - imported by pkg_resources (conditional), setuptools.glob (conditional), setuptools.compat.py311 (conditional), torch.utils._backport_slots (conditional), setuptools._distutils.dist (conditional)
|
||||
missing module named jnius - imported by setuptools._vendor.platformdirs.android (delayed, conditional, optional)
|
||||
missing module named android - imported by setuptools._vendor.platformdirs.android (delayed, conditional, optional)
|
||||
missing module named _posixshmem - imported by multiprocessing.resource_tracker (conditional), multiprocessing.shared_memory (conditional)
|
||||
missing module named multiprocessing.set_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
|
||||
missing module named multiprocessing.get_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
|
||||
missing module named multiprocessing.get_context - imported by multiprocessing (top-level), multiprocessing.pool (top-level), multiprocessing.managers (top-level), multiprocessing.sharedctypes (top-level), joblib.externals.loky.backend.context (top-level)
|
||||
missing module named multiprocessing.TimeoutError - imported by multiprocessing (top-level), multiprocessing.pool (top-level), joblib.parallel (top-level)
|
||||
missing module named _scproxy - imported by urllib.request (conditional)
|
||||
missing module named multiprocessing.BufferTooShort - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
|
||||
missing module named multiprocessing.AuthenticationError - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
|
||||
missing module named multiprocessing.RLock - imported by multiprocessing (delayed, conditional, optional), tqdm.std (delayed, conditional, optional)
|
||||
missing module named multiprocessing.Pool - imported by multiprocessing (delayed, conditional), scipy._lib._util (delayed, conditional), torchvision.datasets.kinetics (top-level)
|
||||
missing module named asyncio.DefaultEventLoopPolicy - imported by asyncio (delayed, conditional), asyncio.events (delayed, conditional)
|
||||
missing module named 'distutils._modified' - imported by setuptools._distutils.file_util (delayed)
|
||||
missing module named 'distutils._log' - imported by setuptools._distutils.command.bdist_dumb (top-level), setuptools._distutils.command.bdist_rpm (top-level), setuptools._distutils.command.build_clib (top-level), setuptools._distutils.command.build_ext (top-level), setuptools._distutils.command.build_py (top-level), setuptools._distutils.command.build_scripts (top-level), setuptools._distutils.command.clean (top-level), setuptools._distutils.command.config (top-level), setuptools._distutils.command.install (top-level), setuptools._distutils.command.install_scripts (top-level), setuptools._distutils.command.sdist (top-level)
|
||||
missing module named trove_classifiers - imported by setuptools.config._validate_pyproject.formats (optional)
|
||||
missing module named importlib_resources - imported by setuptools._vendor.jaraco.text (optional), tqdm.cli (delayed, conditional, optional), jsonschema_specifications._core (optional)
|
||||
missing module named numpy.arccosh - imported by numpy (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.arcsinh - imported by numpy (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.arctan - imported by numpy (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.tan - imported by numpy (top-level), scipy.signal._spline_filters (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.greater - imported by numpy (top-level), scipy.optimize._minpack_py (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.power - imported by numpy (top-level), scipy.stats._kde (top-level)
|
||||
missing module named numpy.sinh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.cosh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.tanh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.expm1 - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.log1p - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.ceil - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.log - imported by numpy (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._discrete_distns (top-level), scipy.stats._morestats (top-level), scipy.signal._waveforms (top-level)
|
||||
missing module named numpy.logical_and - imported by numpy (top-level), scipy.stats._distn_infrastructure (top-level)
|
||||
missing module named numpy.sign - imported by numpy (top-level), scipy.linalg._matfuncs (top-level)
|
||||
missing module named numpy.conjugate - imported by numpy (top-level), scipy.linalg._matfuncs (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.logical_not - imported by numpy (top-level), scipy.linalg._matfuncs (top-level)
|
||||
missing module named numpy.single - imported by numpy (top-level), scipy.linalg._decomp_schur (top-level)
|
||||
missing module named numpy.floor - imported by numpy (top-level), scipy.special._basic (top-level), scipy.special._orthogonal (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.arcsin - imported by numpy (top-level), scipy.linalg._decomp_svd (top-level)
|
||||
missing module named numpy.arccos - imported by numpy (top-level), scipy.linalg._decomp_svd (top-level), scipy.special._orthogonal (top-level)
|
||||
missing module named numpy.complex128 - imported by numpy (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.complex64 - imported by numpy (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.conj - imported by numpy (top-level), scipy.linalg._decomp (top-level), scipy.io._mmio (top-level)
|
||||
missing module named numpy.inexact - imported by numpy (top-level), scipy.linalg._decomp (top-level), scipy.special._basic (top-level), scipy.optimize._minpack_py (top-level)
|
||||
missing module named _dummy_thread - imported by numpy.core.arrayprint (optional), cffi.lock (conditional, optional), torch._jit_internal (optional)
|
||||
missing module named numpy.core.result_type - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.float_ - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.number - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.object_ - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.max - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.all - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.errstate - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.bool_ - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.inf - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.isnan - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.array2string - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.imag - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.real - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.iscomplexobj - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.signbit - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.isscalar - imported by numpy.core (delayed), numpy.testing._private.utils (delayed), numpy.lib.polynomial (top-level)
|
||||
missing module named win32pdh - imported by numpy.testing._private.utils (delayed, conditional)
|
||||
missing module named numpy.core.array - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.isnat - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.ndarray - imported by numpy.core (top-level), numpy.testing._private.utils (top-level), numpy.lib.utils (top-level)
|
||||
missing module named numpy.core.array_repr - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.arange - imported by numpy.core (top-level), numpy.testing._private.utils (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.empty - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.float32 - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.intp - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.linspace - imported by numpy.core (top-level), numpy.lib.index_tricks (top-level)
|
||||
missing module named numpy.core.iinfo - imported by numpy.core (top-level), numpy.lib.twodim_base (top-level)
|
||||
missing module named numpy.core.transpose - imported by numpy.core (top-level), numpy.lib.function_base (top-level)
|
||||
missing module named numpy._typing._ufunc - imported by numpy._typing (conditional)
|
||||
missing module named numpy.uint - imported by numpy (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level)
|
||||
missing module named numpy.core.asarray - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.utils (top-level), numpy.fft._pocketfft (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.integer - imported by numpy.core (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.sqrt - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.conjugate - imported by numpy.core (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.swapaxes - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.zeros - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.reciprocal - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sort - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.argsort - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sign - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.count_nonzero - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.divide - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.matmul - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.asanyarray - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.atleast_2d - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.prod - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.amax - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.amin - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.moveaxis - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.geterrobj - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.finfo - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.isfinite - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sum - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.multiply - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.add - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.dot - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.Inf - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.newaxis - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.complexfloating - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.inexact - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.cdouble - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.csingle - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.double - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.single - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.intc - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.empty_like - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named pyodide_js - imported by threadpoolctl (delayed, optional)
|
||||
missing module named numpy.core.ufunc - imported by numpy.core (top-level), numpy.lib.utils (top-level)
|
||||
missing module named numpy.core.ones - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.hstack - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.atleast_1d - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.atleast_3d - imported by numpy.core (top-level), numpy.lib.shape_base (top-level)
|
||||
missing module named numpy.core.vstack - imported by numpy.core (top-level), numpy.lib.shape_base (top-level)
|
||||
missing module named pickle5 - imported by numpy.compat.py3k (optional)
|
||||
missing module named numpy.eye - imported by numpy (delayed), numpy.core.numeric (delayed), scipy.optimize._optimize (top-level), scipy.linalg._decomp (top-level), scipy.interpolate._pade (top-level), scipy.signal._lti_conversion (top-level)
|
||||
missing module named numpy.recarray - imported by numpy (top-level), numpy.lib.recfunctions (top-level), numpy.ma.mrecords (top-level)
|
||||
missing module named numpy.expand_dims - imported by numpy (top-level), numpy.ma.core (top-level)
|
||||
missing module named numpy.array - imported by numpy (top-level), numpy.ma.core (top-level), numpy.ma.extras (top-level), numpy.ma.mrecords (top-level), scipy.linalg._decomp (top-level), scipy.sparse.linalg._isolve.utils (top-level), scipy.linalg._decomp_schur (top-level), scipy.stats._stats_py (top-level), scipy.interpolate._interpolate (top-level), scipy.interpolate._fitpack_impl (top-level), scipy.interpolate._fitpack2 (top-level), scipy.integrate._ode (top-level), scipy._lib._finite_differences (top-level), scipy.stats._morestats (top-level), scipy.optimize._lbfgsb_py (top-level), scipy.optimize._tnc (top-level), scipy.optimize._slsqp_py (top-level), dill._objects (optional), scipy.io._netcdf (top-level), scipy.signal._spline_filters (top-level), scipy.signal._filter_design (top-level), scipy.signal._lti_conversion (top-level)
|
||||
missing module named numpy.iscomplexobj - imported by numpy (top-level), numpy.ma.core (top-level), scipy.linalg._decomp (top-level), scipy.linalg._decomp_ldl (top-level)
|
||||
missing module named numpy.amin - imported by numpy (top-level), numpy.ma.core (top-level), scipy.stats._morestats (top-level)
|
||||
missing module named numpy.amax - imported by numpy (top-level), numpy.ma.core (top-level), scipy.linalg._matfuncs (top-level), scipy.stats._morestats (top-level)
|
||||
missing module named numpy.isinf - imported by numpy (top-level), numpy.testing._private.utils (top-level), scipy.stats._distn_infrastructure (top-level)
|
||||
missing module named numpy.isnan - imported by numpy (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.isfinite - imported by numpy (top-level), numpy.testing._private.utils (top-level), scipy.linalg._decomp (top-level), scipy.linalg._matfuncs (top-level), scipy.optimize._slsqp_py (top-level)
|
||||
missing module named numpy.float64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), scipy.stats._mstats_extras (top-level), scipy.optimize._lbfgsb_py (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.float32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.uint64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint16 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint8 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), dill._objects (optional), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int16 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int8 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.bytes_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.str_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.void - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.object_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.datetime64 - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.timedelta64 - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.number - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.complexfloating - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.floating - imported by numpy (top-level), numpy._typing._array_like (top-level), torch._dynamo.variables.misc (optional)
|
||||
missing module named numpy.integer - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ctypeslib (top-level)
|
||||
missing module named numpy.unsignedinteger - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.bool_ - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ma.core (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.generic - imported by numpy (top-level), numpy._typing._array_like (top-level), torch._dynamo.variables.misc (optional)
|
||||
missing module named numpy.dtype - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.array_api._typing (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), numpy.ctypeslib (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy.optimize._minpack_py (top-level), dill._dill (delayed), scipy.io._netcdf (top-level), torch._dynamo.variables.misc (optional), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level)
|
||||
missing module named numpy.ndarray - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ma.core (top-level), numpy.ma.extras (top-level), numpy.lib.recfunctions (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), numpy.ctypeslib (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._mstats_basic (top-level), scipy.stats._mstats_extras (top-level), pandas.compat.numpy.function (top-level), dill._dill (delayed), scipy.io._mmio (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level)
|
||||
missing module named numpy.ufunc - imported by numpy (top-level), numpy._typing (top-level), numpy.testing.overrides (top-level), dill._dill (delayed), dill._objects (optional)
|
||||
missing module named numpy.histogramdd - imported by numpy (delayed), numpy.lib.twodim_base (delayed)
|
||||
missing module named numpy._distributor_init_local - imported by numpy (optional), numpy._distributor_init (optional)
|
||||
missing module named openvino_tokenizers - imported by openvino.tools.ovc.utils (delayed, optional)
|
||||
missing module named StringIO - imported by six (conditional)
|
||||
missing module named six.moves.zip - imported by six.moves (top-level), pasta.base.annotate (top-level)
|
||||
runtime module named six.moves - imported by dateutil.tz.tz (top-level), dateutil.tz._factories (top-level), dateutil.tz.win (top-level), dateutil.rrule (top-level), astunparse (top-level), tensorflow.python.distribute.multi_process_runner (top-level), tensorflow.python.distribute.coordinator.cluster_coordinator (top-level), six.moves.urllib (top-level), pasta.base.annotate (top-level)
|
||||
missing module named six.moves.cStringIO - imported by six.moves (top-level), astunparse (top-level)
|
||||
missing module named six.moves.range - imported by six.moves (top-level), dateutil.rrule (top-level)
|
||||
missing module named rules_python - imported by tensorflow.python.platform.resource_loader (optional)
|
||||
missing module named google.protobuf.pyext._message - imported by google.protobuf.pyext (conditional, optional), google.protobuf.internal.api_implementation (conditional, optional), google.protobuf.descriptor (conditional), google.protobuf.pyext.cpp_message (conditional)
|
||||
missing module named google.protobuf.enable_deterministic_proto_serialization - imported by google.protobuf (optional), google.protobuf.internal.api_implementation (optional)
|
||||
missing module named google.protobuf.internal._api_implementation - imported by google.protobuf.internal (optional), google.protobuf.internal.api_implementation (optional)
|
||||
missing module named astn - imported by gast.ast2 (top-level)
|
||||
missing module named theano - imported by opt_einsum.backends.theano (delayed)
|
||||
missing module named jax - imported by optree.integrations.jax (top-level), scipy._lib.array_api_compat.common._helpers (delayed), scipy._lib._array_api (delayed, conditional), opt_einsum.backends.jax (delayed, conditional), keras.src.trainers.data_adapters.data_adapter_utils (delayed), keras.src.backend.jax.core (top-level), keras.src.backend.jax.distribution_lib (top-level), keras.src.backend.jax.image (top-level), keras.src.backend.jax.linalg (top-level), keras.src.backend.jax.math (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.random (top-level), keras.src.backend.jax.rnn (top-level), keras.src.backend.jax.trainer (top-level), keras.src.backend.numpy.nn (top-level), keras.src.backend.jax.export (delayed), keras.src.backend.jax.optimizer (top-level), keras.src.ops.nn (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed), sklearn.externals.array_api_extra._lib._lazy (delayed, conditional), tensorflow.lite.python.util (optional), openvino.frontend.jax.utils (top-level), openvino.frontend.jax.jaxpr_decoder (top-level), openvino.tools.ovc.convert_impl (delayed, conditional)
|
||||
missing module named cupy - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), scipy._lib.array_api_compat.cupy (top-level), scipy._lib.array_api_compat.cupy._aliases (top-level), scipy._lib.array_api_compat.cupy._info (top-level), scipy._lib.array_api_compat.cupy._typing (top-level), scipy._lib._array_api (delayed, conditional), opt_einsum.backends.cupy (delayed), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.cupy (top-level), sklearn.externals.array_api_compat.cupy._aliases (top-level), sklearn.externals.array_api_compat.cupy._info (top-level), sklearn.externals.array_api_compat.cupy._typing (top-level), sklearn.utils._testing (delayed, conditional), sklearn.externals.array_api_compat.cupy.fft (top-level), sklearn.externals.array_api_compat.cupy.linalg (top-level)
|
||||
missing module named simplejson - imported by requests.compat (conditional, optional), huggingface_hub.utils._fixes (optional)
|
||||
missing module named dummy_threading - imported by requests.cookies (optional), joblib.compressor (optional)
|
||||
missing module named 'h2.events' - imported by urllib3.http2.connection (top-level)
|
||||
missing module named 'h2.connection' - imported by urllib3.http2.connection (top-level)
|
||||
missing module named h2 - imported by urllib3.http2.connection (top-level)
|
||||
missing module named zstandard - imported by urllib3.util.request (optional), urllib3.response (optional), fsspec.compression (optional)
|
||||
missing module named brotlicffi - imported by urllib3.util.request (optional), urllib3.response (optional), aiohttp.compression_utils (optional)
|
||||
missing module named collections.Callable - imported by collections (optional), cffi.api (optional), socks (optional)
|
||||
missing module named bcrypt - imported by cryptography.hazmat.primitives.serialization.ssh (optional)
|
||||
missing module named cryptography.x509.UnsupportedExtension - imported by cryptography.x509 (optional), urllib3.contrib.pyopenssl (optional)
|
||||
missing module named chardet - imported by requests (optional), pygments.lexer (delayed, conditional, optional)
|
||||
missing module named 'pyodide.ffi' - imported by urllib3.contrib.emscripten.fetch (delayed, optional)
|
||||
missing module named pyodide - imported by urllib3.contrib.emscripten.fetch (top-level)
|
||||
missing module named js - imported by urllib3.contrib.emscripten.fetch (top-level), fsspec.implementations.http_sync (delayed, optional)
|
||||
missing module named oauth2client - imported by tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver (optional), tensorflow.python.tpu.client.client (optional)
|
||||
missing module named googleapiclient - imported by tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver (optional), tensorflow.python.tpu.client.client (optional)
|
||||
missing module named cloud_tpu_client - imported by tensorflow.python.distribute.cluster_resolver.tpu.tpu_cluster_resolver (optional)
|
||||
missing module named kubernetes - imported by tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver (delayed, conditional, optional)
|
||||
missing module named distributed - imported by fsspec.transaction (delayed), joblib._dask (optional), joblib._parallel_backends (delayed, optional)
|
||||
missing module named 'sphinx.ext' - imported by pyarrow.vendored.docscrape (delayed, conditional)
|
||||
missing module named dateutil.tz.tzfile - imported by dateutil.tz (top-level), dateutil.zoneinfo (top-level)
|
||||
missing module named numexpr - imported by pandas.core.computation.expressions (conditional), pandas.core.computation.engines (delayed)
|
||||
missing module named pandas.core.groupby.PanelGroupBy - imported by pandas.core.groupby (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named numba - imported by pandas.core._numba.executor (delayed, conditional), pandas.core.util.numba_ (delayed, conditional), pandas.core.window.numba_ (delayed, conditional), pandas.core.window.online (delayed, conditional), pandas.core._numba.kernels.mean_ (top-level), pandas.core._numba.kernels.shared (top-level), pandas.core._numba.kernels.sum_ (top-level), pandas.core._numba.kernels.min_max_ (top-level), pandas.core._numba.kernels.var_ (top-level), pandas.core.groupby.numba_ (delayed, conditional), pandas.core._numba.extensions (top-level)
|
||||
missing module named 'numba.extending' - imported by pandas.core._numba.kernels.sum_ (top-level)
|
||||
missing module named pandas.core.window._Rolling_and_Expanding - imported by pandas.core.window (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named 'numba.typed' - imported by pandas.core._numba.extensions (delayed)
|
||||
missing module named 'numba.core' - imported by pandas.core._numba.extensions (top-level)
|
||||
missing module named pytest - imported by scipy._lib._testutils (delayed), sympy.testing.runtests_pytest (optional), pandas._testing._io (delayed), pandas._testing (delayed), torch.testing._internal.common_utils (delayed, conditional, optional), h5py.tests (delayed, optional), networkx.classes.backends (conditional, optional), torch.testing._internal.optests.generate_tests (delayed, conditional), sklearn.utils._testing (optional), fsspec.conftest (top-level), pyarrow.conftest (top-level), pyarrow.tests.util (top-level), torch._numpy.testing.utils (delayed)
|
||||
missing module named cupy_backends - imported by scipy._lib.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'cupy.cuda' - imported by scipy._lib.array_api_compat.cupy._typing (top-level), scipy._lib.array_api_compat.common._helpers (delayed), sklearn.externals.array_api_compat.cupy._typing (top-level), sklearn.externals.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'jax.experimental' - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), keras.src.trainers.data_adapters.data_adapter_utils (delayed), keras.src.testing.test_case (delayed, conditional), keras.src.backend.jax.core (top-level), keras.src.backend.jax.distribution_lib (top-level), keras.src.backend.jax.numpy (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.sparse (top-level), keras.src.backend.jax.export (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named 'jax.numpy' - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), keras.src.backend.jax.core (top-level), keras.src.backend.jax.image (top-level), keras.src.backend.jax.linalg (top-level), keras.src.backend.jax.math (top-level), keras.src.backend.jax.numpy (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.sparse (top-level), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), openvino.frontend.jax.utils (top-level)
|
||||
missing module named 'dask.array' - imported by scipy._lib.array_api_compat.dask.array (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.dask.array (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.dask.array.fft (top-level), sklearn.externals.array_api_compat.dask.array.linalg (top-level)
|
||||
missing module named sparse - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), scipy.sparse.linalg._expm_multiply (delayed, conditional), scipy.sparse.linalg._matfuncs (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named dask - imported by scipy._lib.array_api_compat.common._helpers (delayed), joblib._dask (optional), sklearn.externals.array_api_extra._lib._lazy (delayed, conditional), fsspec.implementations.dask (top-level)
|
||||
missing module named ndonnx - imported by scipy._lib.array_api_compat.common._helpers (delayed), sklearn.externals.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'numpy.lib.array_utils' - imported by scipy._lib.array_api_compat.common._linalg (conditional), joblib._memmapping_reducer (delayed, optional), sklearn.externals.array_api_compat.common._linalg (conditional)
|
||||
missing module named 'numpy.linalg._linalg' - imported by scipy._lib.array_api_compat.numpy.linalg (delayed, optional), sklearn.externals.array_api_compat.numpy.linalg (delayed, optional)
|
||||
missing module named Cython - imported by scipy._lib._testutils (optional)
|
||||
missing module named cython - imported by scipy._lib._testutils (optional), av.packet (top-level), av.audio.codeccontext (top-level), av.filter.loudnorm (top-level), pyarrow.conftest (optional)
|
||||
missing module named sphinx - imported by scipy._lib._docscrape (delayed, conditional)
|
||||
missing module named cupyx - imported by scipy._lib._array_api (delayed, conditional)
|
||||
missing module named scipy.sparse.issparse - imported by scipy.sparse (top-level), scipy.sparse.linalg._interface (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.sparse.linalg._norm (top-level), scipy.sparse.csgraph._laplacian (top-level), scipy._lib._array_api (delayed), scipy.integrate._ivp.bdf (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.radau (top-level), scipy.optimize._constraints (top-level), scipy.optimize._trustregion_constr.projections (top-level), scipy.optimize._lsq.least_squares (top-level), scipy.optimize._lsq.common (top-level), scipy.optimize._lsq.lsq_linear (top-level), scipy.optimize._linprog_highs (top-level), scipy.optimize._differentialevolution (top-level), scipy.optimize._milp (top-level), pandas.core.dtypes.common (delayed, conditional, optional), scipy.io.matlab._mio (delayed, conditional), scipy.io._fast_matrix_market (top-level), scipy.io._mmio (top-level), sklearn.utils._param_validation (top-level), sklearn.externals._scipy.sparse.csgraph._laplacian (top-level), sklearn.utils._set_output (top-level), sklearn.utils.multiclass (top-level), sklearn.metrics.cluster._unsupervised (top-level), sklearn.metrics.pairwise (top-level), sklearn.metrics._pairwise_distances_reduction._dispatcher (top-level), sklearn.cluster._feature_agglomeration (top-level), sklearn.cluster._bicluster (top-level), sklearn.neighbors._base (top-level), sklearn.decomposition._pca (top-level), sklearn.cluster._hdbscan.hdbscan (top-level), sklearn.cluster._optics (top-level), sklearn.manifold._isomap (top-level), sklearn.manifold._t_sne (top-level), sklearn.metrics._classification (top-level), sklearn.metrics._ranking (top-level), sklearn.utils._indexing (top-level), tensorflow.python.keras.engine.data_adapter (delayed, optional), tensorflow.python.keras.engine.training_arrays_v1 (optional), tensorflow.python.keras.engine.training_v1 (optional), sklearn.tree._classes (top-level), scipy.sparse.csgraph._validation (top-level)
|
||||
missing module named scipy.linalg._fblas_64 - imported by scipy.linalg (optional), scipy.linalg.blas (optional)
|
||||
missing module named scipy.linalg._cblas - imported by scipy.linalg (optional), scipy.linalg.blas (optional)
|
||||
missing module named scipy.linalg._flapack_64 - imported by scipy.linalg (optional), scipy.linalg.lapack (optional)
|
||||
missing module named scipy.linalg._clapack - imported by scipy.linalg (optional), scipy.linalg.lapack (optional)
|
||||
missing module named scipy.special.inv_boxcox - imported by scipy.special (top-level), sklearn.preprocessing._data (top-level)
|
||||
missing module named scipy.special.boxcox - imported by scipy.special (top-level), sklearn.preprocessing._data (top-level)
|
||||
missing module named scipy.special.sph_jn - imported by scipy.special (delayed, conditional, optional), sympy.functions.special.bessel (delayed, conditional, optional)
|
||||
missing module named scipy.special.gammaincinv - imported by scipy.special (top-level), scipy.stats._qmvnt (top-level)
|
||||
missing module named scipy.special.ive - imported by scipy.special (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.special.betaln - imported by scipy.special (top-level), scipy.stats._discrete_distns (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.special.beta - imported by scipy.special (top-level), scipy.stats._tukeylambda_stats (top-level)
|
||||
missing module named scipy.special.loggamma - imported by scipy.special (top-level), scipy.fft._fftlog_backend (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.interpolate.PPoly - imported by scipy.interpolate (top-level), scipy.interpolate._cubic (top-level), scipy.spatial.transform._rotation_spline (delayed), scipy.integrate._bvp (delayed)
|
||||
missing module named _curses - imported by curses (top-level), curses.has_key (top-level)
|
||||
missing module named olefile - imported by PIL.FpxImagePlugin (top-level), PIL.MicImagePlugin (top-level)
|
||||
missing module named xmlrpclib - imported by defusedxml.xmlrpc (conditional)
|
||||
missing module named railroad - imported by pyparsing.diagram (top-level)
|
||||
missing module named pyparsing.Word - imported by pyparsing (delayed), pyparsing.unicode (delayed), pydot.dot_parser (top-level)
|
||||
missing module named gi - imported by matplotlib.cbook (delayed, conditional)
|
||||
missing module named 'scikits.umfpack' - imported by scipy.optimize._linprog_ip (optional)
|
||||
missing module named 'sksparse.cholmod' - imported by scipy.optimize._linprog_ip (optional)
|
||||
missing module named sksparse - imported by scipy.optimize._trustregion_constr.projections (optional), scipy.optimize._linprog_ip (optional)
|
||||
missing module named scipy.optimize.root_scalar - imported by scipy.optimize (top-level), scipy.stats._continuous_distns (top-level), scipy.stats._stats_py (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.optimize.brentq - imported by scipy.optimize (delayed), scipy.integrate._ivp.ivp (delayed), scipy.stats._binomtest (top-level), scipy.stats._odds_ratio (top-level)
|
||||
missing module named scipy.optimize.OptimizeResult - imported by scipy.optimize (top-level), scipy.integrate._bvp (top-level), scipy.integrate._ivp.ivp (top-level), scipy._lib.cobyqa.main (top-level), scipy._lib.cobyqa.problem (top-level), scipy.optimize._lsq.least_squares (top-level), scipy.optimize._lsq.trf (top-level), scipy.optimize._lsq.dogbox (top-level), scipy.optimize._lsq.lsq_linear (top-level), scipy.optimize._lsq.trf_linear (top-level), scipy.optimize._lsq.bvls (top-level), scipy.optimize._spectral (top-level), scipy.optimize._differentialevolution (top-level), scipy.optimize._shgo (top-level), scipy.optimize._dual_annealing (top-level), scipy.optimize._qap (top-level), scipy.optimize._direct_py (top-level)
|
||||
missing module named scipy.optimize.minimize_scalar - imported by scipy.optimize (top-level), scipy.interpolate._bsplines (top-level), scipy.stats._multicomp (top-level)
|
||||
missing module named scipy.special.airy - imported by scipy.special (top-level), scipy.special._orthogonal (top-level)
|
||||
missing module named scipy.linalg.orthogonal_procrustes - imported by scipy.linalg (top-level), scipy.spatial._procrustes (top-level)
|
||||
missing module named uarray - imported by scipy._lib.uarray (conditional, optional)
|
||||
missing module named scipy.linalg.cholesky - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.optimize._optimize (top-level), scipy.optimize._minpack_py (top-level), sklearn.gaussian_process._gpc (top-level), sklearn.gaussian_process._gpr (top-level)
|
||||
missing module named scipy.linalg.cho_solve - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.optimize._trustregion_exact (top-level), scipy.optimize._lsq.common (top-level), sklearn.gaussian_process._gpc (top-level), sklearn.gaussian_process._gpr (top-level)
|
||||
missing module named scipy.linalg.cho_factor - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.optimize._lsq.common (top-level)
|
||||
missing module named scipy.linalg.inv - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.optimize._nonlin (top-level)
|
||||
missing module named scipy.linalg.lu_solve - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.integrate._ivp.bdf (top-level), scipy.integrate._ivp.radau (top-level)
|
||||
missing module named scipy.linalg.lu_factor - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.integrate._ivp.bdf (top-level), scipy.integrate._ivp.radau (top-level)
|
||||
missing module named scipy.linalg.eigh - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy._lib.cobyqa.models (top-level), sklearn.decomposition._kernel_pca (top-level), sklearn.manifold._locally_linear (top-level), sklearn.manifold._spectral_embedding (top-level)
|
||||
missing module named scipy.linalg.eig - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level)
|
||||
missing module named scipy.linalg.lstsq - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level), nncf.tensor.functions.numpy_linalg (top-level), scipy.signal._fir_filter_design (top-level), scipy.signal._savitzky_golay (top-level)
|
||||
missing module named scipy.linalg.qr_insert - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level)
|
||||
missing module named scipy.linalg.svd - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level), scipy.sparse.linalg._eigen._svds (top-level), scipy.linalg._decomp_polar (top-level), scipy.optimize._minpack_py (top-level), scipy.optimize._lsq.trf (top-level), scipy.optimize._nonlin (top-level), scipy.optimize._remove_redundancy (top-level), sklearn.cluster._spectral (top-level), sklearn.manifold._locally_linear (top-level)
|
||||
missing module named scipy.linalg.solve - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level), scipy.interpolate._bsplines (top-level), scipy.interpolate._cubic (top-level), scipy.optimize._nonlin (top-level), scipy.optimize._linprog_rs (top-level), sklearn.gaussian_process._gpc (top-level), sklearn.manifold._locally_linear (top-level), scipy.signal._fir_filter_design (top-level)
|
||||
missing module named scipy.linalg.qr - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level), scipy._lib.cobyqa.subsolvers.optim (top-level), scipy.optimize._lsq.trf (top-level), scipy.optimize._lsq.trf_linear (top-level), scipy.optimize._nonlin (top-level), sklearn.cluster._spectral (top-level), sklearn.manifold._locally_linear (top-level), scipy.signal._ltisys (top-level)
|
||||
missing module named scikits - imported by scipy.sparse.linalg._dsolve.linsolve (optional)
|
||||
missing module named scipy.sparse.diags - imported by scipy.sparse (delayed), scipy.sparse.linalg._special_sparse_arrays (delayed)
|
||||
missing module named scipy.sparse.spdiags - imported by scipy.sparse (delayed), scipy.sparse.linalg._special_sparse_arrays (delayed)
|
||||
missing module named scipy.sparse.dia_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level)
|
||||
missing module named scipy.sparse.kron - imported by scipy.sparse (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level)
|
||||
missing module named scipy.sparse.eye - imported by scipy.sparse (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level), scipy.integrate._ivp.bdf (top-level), scipy.integrate._ivp.radau (top-level), scipy.optimize._trustregion_constr.equality_constrained_sqp (top-level), scipy.optimize._trustregion_constr.projections (top-level), sklearn.manifold._locally_linear (top-level)
|
||||
missing module named scipy.sparse.diags_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level)
|
||||
missing module named scipy.sparse.eye_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level)
|
||||
missing module named scipy.sparse.csc_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.optimize._milp (top-level), scipy.io._harwell_boeing.hb (top-level)
|
||||
missing module named scipy.sparse.csr_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.interpolate._bsplines (top-level), scipy.interpolate._ndbspline (top-level)
|
||||
missing module named scipy.sparse.SparseEfficiencyWarning - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), sklearn.cluster._optics (top-level)
|
||||
missing module named scipy.stats.iqr - imported by scipy.stats (delayed), scipy.stats._hypotests (delayed)
|
||||
missing module named dummy_thread - imported by cffi.lock (conditional, optional)
|
||||
missing module named thread - imported by cffi.lock (conditional, optional), cffi.cparser (conditional, optional)
|
||||
missing module named cStringIO - imported by cffi.ffiplatform (optional)
|
||||
missing module named cPickle - imported by pycparser.ply.yacc (delayed, optional)
|
||||
missing module named cffi._pycparser - imported by cffi (optional), cffi.cparser (optional)
|
||||
missing module named scipy._distributor_init_local - imported by scipy (optional), scipy._distributor_init (optional)
|
||||
missing module named traitlets - imported by pandas.io.formats.printing (delayed, conditional)
|
||||
missing module named 'IPython.core' - imported by sympy.interactive.printing (delayed, optional), pandas.io.formats.printing (delayed, conditional), h5py (delayed, conditional, optional), h5py.ipy_completer (top-level), rich.pretty (delayed, optional)
|
||||
missing module named IPython - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional, optional), pandas.io.formats.printing (delayed), h5py (delayed, conditional, optional), h5py.ipy_completer (top-level), keras.src.utils.model_visualization (delayed, conditional, optional), keras.src.saving.file_editor (delayed, optional), tensorflow.python.keras.utils.vis_utils (delayed, conditional, optional)
|
||||
missing module named 'lxml.etree' - imported by openpyxl.xml (delayed, optional), openpyxl.xml.functions (conditional), pandas.io.xml (delayed), pandas.io.formats.xml (delayed), pandas.io.html (delayed), networkx.readwrite.graphml (delayed, optional)
|
||||
missing module named openpyxl.tests - imported by openpyxl.reader.excel (optional)
|
||||
missing module named 'odf.config' - imported by pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.style' - imported by pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.text' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.table' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.opendocument' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named xlrd - imported by pandas.io.excel._xlrd (delayed, conditional), pandas.io.excel._base (delayed, conditional)
|
||||
missing module named pyxlsb - imported by pandas.io.excel._pyxlsb (delayed, conditional)
|
||||
missing module named 'odf.office' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named 'odf.element' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named 'odf.namespaces' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named odf - imported by pandas.io.excel._odfreader (conditional)
|
||||
missing module named python_calamine - imported by pandas.io.excel._calamine (delayed, conditional)
|
||||
missing module named botocore - imported by pandas.io.common (delayed, conditional, optional)
|
||||
missing module named collections.Mapping - imported by collections (optional), pytz.lazy (optional)
|
||||
missing module named UserDict - imported by pytz.lazy (optional)
|
||||
missing module named Foundation - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named AppKit - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named PyQt4 - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named qtpy - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named 'sqlalchemy.engine' - imported by pandas.io.sql (delayed)
|
||||
missing module named 'sqlalchemy.types' - imported by pandas.io.sql (delayed, conditional)
|
||||
missing module named 'sqlalchemy.schema' - imported by pandas.io.sql (delayed)
|
||||
missing module named 'sqlalchemy.sql' - imported by pandas.io.sql (conditional)
|
||||
missing module named sqlalchemy - imported by pandas.io.sql (delayed, conditional)
|
||||
missing module named pandas.core.internals.Block - imported by pandas.core.internals (conditional), pandas.io.pytables (conditional)
|
||||
missing module named tables - imported by pandas.io.pytables (delayed, conditional)
|
||||
missing module named lxml - imported by sympy.utilities.mathml (delayed), pandas.io.xml (conditional)
|
||||
missing module named 'google.auth' - imported by pandas.io.gbq (conditional)
|
||||
missing module named pandas.Panel - imported by pandas (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named 'lxml.html' - imported by pandas.io.html (delayed)
|
||||
missing module named bs4 - imported by pandas.io.html (delayed)
|
||||
missing module named 'pandas.api.internals' - imported by pyarrow.pandas_compat (delayed, conditional)
|
||||
missing module named 'pyarrow._cuda' - imported by pyarrow.cuda (top-level)
|
||||
missing module named 'pyarrow.gandiva' - imported by pyarrow.conftest (optional)
|
||||
missing module named 'pyarrow._azurefs' - imported by pyarrow.fs (optional)
|
||||
missing module named 'setuptools_scm.git' - imported by pyarrow (delayed, optional)
|
||||
missing module named setuptools_scm - imported by matplotlib (delayed, conditional, optional), pyarrow (optional), tqdm.version (optional)
|
||||
missing module named fastparquet - imported by fsspec.parquet (delayed), pyarrow.conftest (optional)
|
||||
missing module named requests_kerberos - imported by fsspec.implementations.webhdfs (delayed, conditional)
|
||||
missing module named smbprotocol - imported by fsspec.implementations.smb (top-level)
|
||||
missing module named smbclient - imported by fsspec.implementations.smb (top-level)
|
||||
missing module named paramiko - imported by fsspec.implementations.sftp (top-level)
|
||||
missing module named kerchunk - imported by fsspec.implementations.reference (delayed)
|
||||
missing module named ujson - imported by fsspec.implementations.cache_metadata (optional), fsspec.implementations.reference (optional)
|
||||
missing module named 'libarchive.ffi' - imported by fsspec.implementations.libarchive (top-level)
|
||||
missing module named libarchive - imported by fsspec.implementations.libarchive (top-level)
|
||||
missing module named uvloop - imported by aiohttp.worker (delayed)
|
||||
missing module named annotationlib - imported by attr._compat (conditional)
|
||||
missing module named async_timeout - imported by aiohttp.helpers (conditional), aiohttp.web_ws (conditional), aiohttp.client_ws (conditional)
|
||||
missing module named 'gunicorn.workers' - imported by aiohttp.worker (top-level)
|
||||
missing module named gunicorn - imported by aiohttp.worker (top-level)
|
||||
missing module named aiodns - imported by aiohttp.resolver (optional)
|
||||
missing module named pygit2 - imported by fsspec.implementations.git (top-level)
|
||||
missing module named 'distributed.worker' - imported by fsspec.implementations.dask (top-level)
|
||||
missing module named 'distributed.client' - imported by fsspec.implementations.dask (top-level)
|
||||
missing module named panel - imported by fsspec.gui (top-level)
|
||||
missing module named fuse - imported by fsspec.fuse (top-level)
|
||||
missing module named lz4 - imported by fsspec.compression (optional), joblib.compressor (optional)
|
||||
missing module named snappy - imported by fsspec.compression (delayed, optional)
|
||||
missing module named lzmaffi - imported by fsspec.compression (optional)
|
||||
missing module named isal - imported by fsspec.compression (optional)
|
||||
missing module named 'IPython.display' - imported by tqdm.notebook (conditional, optional), rich.jupyter (delayed, optional), rich.live (delayed, conditional, optional), huggingface_hub._login (delayed, optional)
|
||||
missing module named 'IPython.html' - imported by tqdm.notebook (conditional, optional)
|
||||
missing module named ipywidgets - imported by tqdm.notebook (conditional, optional), rich.live (delayed, conditional, optional)
|
||||
missing module named boto3 - imported by tensorboard.compat.tensorflow_stub.io.gfile (optional)
|
||||
missing module named 'botocore.exceptions' - imported by tensorboard.compat.tensorflow_stub.io.gfile (optional)
|
||||
missing module named tensorboard.compat.notf - imported by tensorboard.compat (delayed, optional)
|
||||
missing module named 'tensorflow.compat' - imported by tensorboard.util.op_evaluator (delayed), tensorboard.util.encoder (delayed), tensorboard.plugins.audio.summary (delayed), tensorboard.plugins.custom_scalar.summary (delayed), tensorboard.plugins.histogram.summary (delayed), tensorboard.plugins.image.summary (delayed), tensorboard.plugins.pr_curve.summary (delayed), tensorboard.plugins.scalar.summary (delayed), tensorboard.plugins.text.summary (delayed), keras.src.callbacks.tensorboard (delayed)
|
||||
missing module named 'keras.optimizers.optimizer_v2' - imported by tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named triton - imported by torch._utils_internal (delayed, conditional), torch._dynamo.logging (conditional, optional), torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed), torch._inductor.runtime.autotune_cache (conditional), torch._inductor.runtime.coordinate_descent_tuner (optional), torch._inductor.runtime.triton_heuristics (conditional, optional), torch._inductor.codegen.wrapper (delayed, conditional), torch._inductor.kernel.mm_common (delayed), torch._inductor.kernel.mm_plus_mm (delayed), torch.sparse._triton_ops_meta (delayed, conditional), torch.sparse._triton_ops (conditional), torch._dynamo.utils (conditional), torch._inductor.compile_worker.__main__ (optional), torch._inductor.runtime.triton_helpers (top-level), torch.testing._internal.triton_utils (conditional)
|
||||
missing module named 'torch._C._distributed_c10d' - imported by torch.distributed (conditional), torch.distributed.distributed_c10d (top-level), torch.distributed.constants (top-level), torch.distributed.rpc (conditional), torch.distributed.tensor._collective_utils (top-level), torch.distributed._shard.sharded_tensor.reshard (top-level), torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag (top-level), torch.testing._internal.distributed.fake_pg (top-level), torch._dynamo.variables.distributed (delayed), torch.distributed._symmetric_memory (top-level), torch.distributed.elastic.control_plane (delayed), torch.testing._internal.distributed.multi_threaded_pg (top-level)
|
||||
missing module named torch.randperm - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named torch.Generator - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named torch.default_generator - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named soundfile - imported by torchaudio._backend.soundfile_backend (conditional, optional)
|
||||
missing module named torch.norm_except_dim - imported by torch (top-level), torch.nn.utils.weight_norm (top-level)
|
||||
missing module named torch._weight_norm - imported by torch (top-level), torch.nn.utils.weight_norm (top-level)
|
||||
missing module named 'triton.language' - imported by torch._inductor.codegen.triton_split_scan (delayed), torch._inductor.codegen.wrapper (delayed), torch.sparse._triton_ops (conditional), torch._inductor.runtime.triton_helpers (top-level), torch.testing._internal.triton_utils (conditional)
|
||||
missing module named 'triton.runtime' - imported by torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed), torch._inductor.runtime.triton_heuristics (conditional), torch._library.triton (delayed), torch._inductor.select_algorithm (delayed, optional), torch._inductor.ir (delayed), torch._dynamo.variables.builder (delayed, conditional), torch._inductor.fx_passes.reinplace (delayed, conditional), torch._inductor.utils (delayed)
|
||||
missing module named 'triton.compiler' - imported by torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed, optional), torch._inductor.runtime.hints (optional), torch._inductor.runtime.triton_heuristics (conditional, optional), torch._inductor.scheduler (delayed), torch._inductor.codegen.triton (delayed), torch._inductor.codecache (delayed, optional), torch._inductor.async_compile (delayed, optional)
|
||||
missing module named dl - imported by setuptools.command.build_ext (conditional, optional)
|
||||
missing module named 'Cython.Distutils' - imported by setuptools.command.build_ext (conditional, optional)
|
||||
missing module named 'win32com.shell' - imported by torch._appdirs (conditional, optional)
|
||||
missing module named 'com.sun' - imported by torch._appdirs (delayed, conditional, optional)
|
||||
missing module named com - imported by torch._appdirs (delayed)
|
||||
missing module named win32api - imported by torch._appdirs (delayed, conditional, optional)
|
||||
missing module named win32com - imported by torch._appdirs (delayed)
|
||||
missing module named halide - imported by torch._inductor.codecache (delayed, conditional), torch._inductor.runtime.halide_helpers (optional)
|
||||
missing module named gmpy2.qdiv - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.lcm - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.gcd - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.gcdext - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.denom - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.numer - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.mpq - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.mpz - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named 'pyglet.image' - imported by sympy.printing.preview (delayed, optional)
|
||||
missing module named 'pyglet.window' - imported by sympy.plotting.pygletplot.managed_window (top-level), sympy.plotting.pygletplot.plot_controller (top-level), sympy.printing.preview (delayed, optional)
|
||||
missing module named pyglet - imported by sympy.plotting.pygletplot.plot (optional), sympy.plotting.pygletplot.plot_axes (top-level), sympy.printing.preview (delayed, conditional, optional), sympy.testing.runtests (delayed, conditional)
|
||||
missing module named 'pyglet.gl' - imported by sympy.plotting.pygletplot.plot_axes (top-level), sympy.plotting.pygletplot.util (top-level), sympy.plotting.pygletplot.plot_window (top-level), sympy.plotting.pygletplot.plot_camera (top-level), sympy.plotting.pygletplot.plot_rotation (top-level), sympy.plotting.pygletplot.plot_curve (top-level), sympy.plotting.pygletplot.plot_mode_base (top-level), sympy.plotting.pygletplot.plot_surface (top-level)
|
||||
missing module named 'pyglet.clock' - imported by sympy.plotting.pygletplot.managed_window (top-level)
|
||||
missing module named 'sage.libs' - imported by mpmath.libmp.backend (conditional, optional), mpmath.libmp.libelefun (conditional, optional), mpmath.libmp.libmpf (conditional, optional), mpmath.libmp.libmpc (conditional, optional), mpmath.libmp.libhyper (delayed, conditional), mpmath.ctx_mp (conditional)
|
||||
missing module named sage - imported by mpmath.libmp.backend (conditional, optional)
|
||||
missing module named gmpy - imported by mpmath.libmp.backend (conditional, optional)
|
||||
missing module named pysat - imported by sympy.logic.algorithms.minisat22_wrapper (delayed)
|
||||
missing module named pycosat - imported by sympy.logic.algorithms.pycosat_wrapper (delayed)
|
||||
missing module named flint - imported by sympy.external.gmpy (delayed, optional), sympy.polys.polyutils (conditional), sympy.polys.factortools (conditional), sympy.polys.polyclasses (conditional), sympy.polys.domains.groundtypes (conditional), sympy.polys.domains.finitefield (conditional)
|
||||
missing module named all - imported by sympy.testing.runtests (delayed, optional)
|
||||
missing module named 'IPython.Shell' - imported by sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.frontend' - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.terminal' - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.iplib' - imported by sympy.interactive.printing (delayed, optional)
|
||||
missing module named py - imported by mpmath.tests.runtests (delayed, conditional)
|
||||
missing module named 'sage.all' - imported by sympy.core.function (delayed)
|
||||
missing module named 'sage.interfaces' - imported by sympy.core.basic (delayed)
|
||||
missing module named 'cutlass_library.gemm_operation' - imported by torch._inductor.codegen.cuda.gemm_template (delayed), torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions (conditional)
|
||||
missing module named 'cutlass_library.library' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional), torch._inductor.codegen.cuda.gemm_template (delayed), torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions (conditional)
|
||||
missing module named 'cutlass_library.generator' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed)
|
||||
missing module named 'cutlass_library.manifest' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional)
|
||||
missing module named cutlass_library - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional)
|
||||
missing module named torch.multiprocessing._prctl_pr_set_pdeathsig - imported by torch.multiprocessing (top-level), torch.multiprocessing.spawn (top-level)
|
||||
missing module named 'torch.utils._config_typing' - imported by torch._dynamo.config (conditional), torch._inductor.config (conditional), torch._functorch.config (conditional)
|
||||
missing module named 'torch._C._functorch' - imported by torch._subclasses.fake_tensor (top-level), torch._subclasses.meta_utils (top-level), torch._functorch.pyfunctorch (top-level), torch._higher_order_ops.cond (top-level), torch._functorch.autograd_function (top-level), torch._functorch.utils (top-level), torch._functorch.vmap (top-level), torch._functorch.eager_transforms (top-level)
|
||||
missing module named torch.trunc - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.tanh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.tan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.square - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sqrt - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sinh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sin - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.signbit - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sign - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.round - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.reciprocal - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.rad2deg - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.negative - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_not - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log2 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log1p - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log10 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isnan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isinf - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isfinite - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.floor - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.expm1 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.exp2 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.exp - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.deg2rad - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.cosh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.cos - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.conj_physical - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.ceil - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_not - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctanh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arcsinh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arcsin - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arccosh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arccos - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.absolute - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.true_divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.subtract - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.remainder - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.pow - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.not_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.nextafter - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.multiply - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.minimum - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.maximum - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_xor - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_or - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_and - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logaddexp2 - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logaddexp - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.less_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.less - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.ldexp - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.lcm - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.hypot - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.heaviside - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.greater_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.greater - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.gcd - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmod - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmin - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmax - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.floor_divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.float_power - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.eq - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.copysign - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_xor - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_right_shift - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_or - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_left_shift - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_and - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctan2 - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.add - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch_xla - imported by torch._functorch.fx_minifier (delayed), huggingface_hub.serialization._torch (delayed, conditional)
|
||||
missing module named deeplearning - imported by torch._inductor.fx_passes.group_batch_fusion (optional)
|
||||
missing module named torch._inductor.fx_passes.fb - imported by torch._inductor.fx_passes (delayed, conditional), torch._inductor.fx_passes.pre_grad (delayed, conditional)
|
||||
missing module named 'torch_xla.distributed' - imported by torch.distributed.tensor._api (delayed, conditional, optional)
|
||||
missing module named torchdistx - imported by torch.distributed.fsdp._init_utils (optional)
|
||||
missing module named 'torch._C._distributed_rpc' - imported by torch.distributed.rpc (conditional), torch.distributed.rpc.api (top-level), torch.distributed.rpc.constants (top-level), torch.distributed.rpc.internal (top-level), torch.distributed.rpc.options (top-level), torch._jit_internal (conditional)
|
||||
missing module named foo - imported by torch._functorch.compilers (delayed)
|
||||
missing module named torch.broadcast_shapes - imported by torch (top-level), torch._numpy._funcs_impl (top-level)
|
||||
missing module named torch._numpy.float_ - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.max - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.isnan - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.signbit - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.real - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.isscalar - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.iscomplexobj - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.imag - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.intp - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named torch._numpy.empty - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named torch._numpy.arange - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named 'onnxscript._framework_apis' - imported by torch.onnx._internal._exporter_legacy (delayed), torch.onnx._internal._lazy_import (conditional)
|
||||
missing module named onnxscript - imported by torch.onnx._internal.fx.registration (conditional), torch.onnx._internal._exporter_legacy (delayed, conditional, optional), torch.onnx._internal.fx.diagnostics (top-level), torch.onnx._internal.fx.onnxfunction_dispatcher (conditional), torch.onnx._internal.fx.fx_onnx_interpreter (top-level), torch.onnx._internal.onnxruntime (delayed, conditional, optional), torch.onnx._internal._lazy_import (conditional), torch.onnx._internal.exporter._core (top-level), torch.onnx._internal.exporter._dispatching (top-level), torch.onnx._internal.exporter._schemas (top-level), torch.onnx._internal.exporter._registration (conditional), torch.onnx._internal.exporter._building (top-level), torch.onnx._internal.exporter._tensors (top-level), torch.onnx._internal.exporter._ir_passes (top-level), torch.onnx._internal.exporter._reporting (conditional)
|
||||
missing module named 'onnx.onnx_cpp2py_export.defs' - imported by onnx.defs (top-level), onnx.reference.ops._op_list (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.version_converter' - imported by onnx.version_converter (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.shape_inference' - imported by onnx.shape_inference (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.printer' - imported by onnx.printer (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.parser' - imported by onnx.parser (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.checker' - imported by onnx.checker (top-level)
|
||||
missing module named pyinstrument - imported by torch.onnx._internal.exporter._core (delayed, conditional)
|
||||
missing module named 'onnxscript.ir' - imported by torch.onnx._internal.exporter._core (top-level), torch.onnx._internal.exporter._building (top-level)
|
||||
missing module named 'onnxscript.evaluator' - imported by torch.onnx._internal.exporter._core (top-level)
|
||||
missing module named onnxruntime.capi.build_and_package_info - imported by onnxruntime.capi.onnxruntime_validation (delayed, conditional, optional)
|
||||
missing module named 'onnxruntime.training' - imported by onnxruntime.capi.onnxruntime_validation (delayed, optional)
|
||||
missing module named 'onnxscript.function_libs' - imported by torch.onnx._internal.fx.diagnostics (top-level), torch.onnx._internal.fx.onnxfunction_dispatcher (conditional), torch.onnx._internal.fx.decomposition_skip (top-level), torch.onnx._internal.fx.fx_onnx_interpreter (top-level), torch.onnx._internal.exporter._ir_passes (delayed, optional)
|
||||
missing module named 'onnx.defs.OpSchema' - imported by torch.onnx._internal.fx.type_utils (conditional)
|
||||
missing module named transformers - imported by torch._dynamo.variables.dicts (delayed), torch.onnx._internal.fx.patcher (delayed, conditional, optional), torch.onnx._internal.fx.dynamo_graph_extractor (delayed, optional), nncf.data.generators (delayed, optional), torch.testing._internal.common_distributed (delayed, optional)
|
||||
missing module named accimage - imported by torchvision.transforms.transforms (optional), torchvision.transforms.functional (optional), torchvision.transforms._functional_pil (optional), torchvision.datasets.folder (delayed)
|
||||
missing module named torch.ao.quantization.QuantStub - imported by torch.ao.quantization (top-level), torchvision.models.quantization.mobilenetv2 (top-level), torchvision.models.quantization.mobilenetv3 (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.DeQuantStub - imported by torch.ao.quantization (top-level), torchvision.models.quantization.mobilenetv2 (top-level), torchvision.models.quantization.mobilenetv3 (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named 'monkeytype.tracing' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'monkeytype.db' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'monkeytype.config' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named monkeytype - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'torch._C._jit_tree_views' - imported by torch._sources (top-level), torch.jit.frontend (top-level)
|
||||
missing module named wcwidth - imported by tabulate (optional)
|
||||
missing module named torch.ao.quantization.QConfig - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.qconfig_mapping_utils (top-level), torch.ao.quantization.fx.lstm_utils (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QConfigMapping - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.custom_config (top-level), torch.ao.ns.fx.n_shadows_utils (top-level), torch.ao.ns.fx.qconfig_multi_mapping (top-level), torch.ao.ns._numeric_suite_fx (top-level), torch.ao.quantization.fx.lstm_utils (top-level), torch.ao.quantization.pt2e.prepare (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QuantType - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.utils (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QConfigAny - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.utils (top-level)
|
||||
missing module named torch.ao.quantization.float_qparams_weight_only_qconfig - imported by torch.ao.quantization (delayed, conditional), torch.ao.nn.quantized.modules.embedding_ops (delayed, conditional), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named pycocotools - imported by torchvision.datasets.coco (delayed), torchvision.tv_tensors._dataset_wrapper (delayed)
|
||||
missing module named gdown - imported by torchvision.datasets.utils (delayed, optional)
|
||||
missing module named 'IPython.utils' - imported by h5py.ipy_completer (top-level)
|
||||
missing module named mpi4py - imported by h5py._hl.files (delayed)
|
||||
missing module named lmdb - imported by torchvision.datasets.lsun (delayed)
|
||||
missing module named 'onnxscript.rewriter' - imported by torch.onnx._internal.onnxruntime (delayed, conditional, optional)
|
||||
missing module named 'torch._C._onnx' - imported by torch.onnx (top-level), torch.onnx.utils (top-level), torch.onnx.symbolic_helper (top-level), torch.onnx._globals (top-level), torch.onnx.symbolic_opset9 (top-level), torch.onnx.symbolic_opset10 (top-level), torch.onnx.symbolic_opset13 (top-level), torch.onnx._experimental (top-level), torch.onnx.verification (top-level)
|
||||
missing module named torchrec - imported by torch._dynamo.variables.user_defined (delayed)
|
||||
missing module named 'torch._C._lazy_ts_backend' - imported by torch._lazy.ts_backend (top-level), torch._lazy.computation (top-level)
|
||||
missing module named 'torch._C._lazy' - imported by torch._lazy (top-level), torch._lazy.device_context (top-level), torch._lazy.metrics (top-level), torch._lazy.computation (top-level), torch._lazy.config (top-level), torch._lazy.debug (top-level), torch._lazy.ir_cache (top-level)
|
||||
missing module named hypothesis - imported by torch.testing._internal.common_utils (optional), torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named 'numba.cuda' - imported by torch.testing._internal.common_cuda (conditional, optional)
|
||||
missing module named 'xmlrunner.result' - imported by torch.testing._internal.common_utils (delayed, conditional)
|
||||
missing module named xmlrunner - imported by torch.testing._internal.common_utils (delayed, conditional)
|
||||
missing module named expecttest - imported by torch.testing._internal.common_utils (top-level)
|
||||
missing module named '_pytest.recwarn' - imported by torch._dynamo.variables.user_defined (delayed, optional)
|
||||
missing module named _pytest - imported by torch._dynamo.variables.user_defined (delayed, optional)
|
||||
missing module named 'torch._C._dynamo' - imported by torch._guards (top-level), torch._dynamo.convert_frame (top-level), torch._dynamo.guards (top-level), torch._dynamo.eval_frame (top-level), torch._dynamo.decorators (conditional), torch._dynamo.types (top-level)
|
||||
missing module named pygraphviz - imported by networkx.drawing.nx_agraph (delayed, optional)
|
||||
missing module named 'triton.backends' - imported by torch._inductor.runtime.triton_heuristics (conditional, optional)
|
||||
missing module named 'triton.testing' - imported by torch._inductor.runtime.benchmarking (delayed, optional), torch._inductor.utils (delayed)
|
||||
missing module named 'torch_xla.core' - imported by torch._dynamo.testing (delayed, conditional), huggingface_hub.serialization._torch (delayed, conditional, optional), torch._dynamo.backends.torchxla (delayed, optional)
|
||||
missing module named torch.float16 - imported by torch (delayed, conditional), torch._inductor.codegen.cpp_wrapper_cuda (delayed, conditional)
|
||||
missing module named torch.bfloat16 - imported by torch (delayed, conditional), torch._inductor.codegen.cpp_wrapper_cuda (delayed, conditional)
|
||||
missing module named torch.ScriptObject - imported by torch (delayed), torch.export.graph_signature (delayed)
|
||||
missing module named moviepy - imported by torch.utils.tensorboard.summary (delayed, optional)
|
||||
missing module named 'torch._C._monitor' - imported by torch.monitor (top-level)
|
||||
missing module named 'libfb.py' - imported by torch._dynamo.debug_utils (conditional), torch._inductor.codecache (delayed, conditional), torch._inductor.compile_worker.subproc_pool (delayed, conditional)
|
||||
missing module named 'torch._inductor.fb' - imported by torch._inductor.runtime.autotune_cache (delayed, conditional, optional), torch._inductor.cpp_builder (conditional), torch._inductor.graph (conditional), torch._inductor.codecache (delayed, conditional, optional), torch._inductor.compile_fx (delayed, conditional, optional)
|
||||
missing module named 'triton.fb' - imported by torch._inductor.cpp_builder (conditional), torch._inductor.codecache (conditional)
|
||||
missing module named rfe - imported by torch._inductor.remote_cache (conditional)
|
||||
missing module named redis - imported by torch._inductor.remote_cache (optional)
|
||||
missing module named 'ck4inductor.universal_gemm' - imported by torch._inductor.utils (delayed, optional)
|
||||
missing module named ck4inductor - imported by torch._inductor.utils (delayed, optional)
|
||||
missing module named libfb - imported by torch._inductor.config (conditional, optional)
|
||||
missing module named amdsmi - imported by torch.cuda (conditional, optional), torch.cuda.memory (delayed, conditional, optional)
|
||||
missing module named pynvml - imported by torch.cuda (delayed, conditional, optional), torch.cuda.memory (delayed, conditional, optional)
|
||||
missing module named torch.device - imported by torch (top-level), torch.types (top-level), torch.nn.modules.module (top-level), torch.cuda (top-level), torch.xpu (top-level), torch._inductor.graph (top-level), torch.distributed.nn.api.remote_module (top-level), torch._library.infer_schema (top-level), torch.cpu (top-level), torch.mtia (top-level)
|
||||
missing module named 'torch._C._profiler' - imported by torch.utils._traceback (delayed), torch.profiler (top-level), torch.autograd.profiler (top-level), torch.profiler.profiler (top-level), torch.profiler._memory_profiler (top-level), torch.cuda._memory_viz (delayed), torch.testing._internal.logging_tensor (top-level), torch.autograd (top-level), torch.profiler._pattern_matcher (top-level)
|
||||
missing module named 'torch._C._autograd' - imported by torch._subclasses.meta_utils (top-level), torch.profiler (top-level), torch.profiler._memory_profiler (top-level), torch.autograd (top-level)
|
||||
missing module named z3 - imported by torch.fx.experimental.validator (optional), torch.fx.experimental.migrate_gradual_types.transform_to_z3 (optional), torch.fx.experimental.migrate_gradual_types.z3_types (optional)
|
||||
missing module named torch.Size - imported by torch (top-level), torch.types (top-level), torch.nn.modules.normalization (top-level)
|
||||
missing module named torch.nn.Sequential - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ParameterList - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ParameterDict - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ModuleList - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ModuleDict - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ReLU - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Linear - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv3d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv2d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv1d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm3d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm2d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm1d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Module - imported by torch.nn (top-level), torch.optim.swa_utils (top-level), torch.ao.quantization.fake_quantize (top-level), torch.jit._recursive (top-level), torch.jit._script (top-level), torch.jit._trace (top-level), torch._dynamo.mutation_guard (top-level), torch.fx.passes.utils.common (top-level), torch.distributed.nn.api.remote_module (top-level), torchaudio.models.wav2vec2.utils.import_fairseq (top-level), torchaudio.models.wav2vec2.model (top-level), torchaudio.models.wav2vec2.components (top-level), torchaudio.models.wav2vec2.utils.import_huggingface (top-level), torchaudio.pipelines._wav2vec2.impl (top-level), torch.fx.experimental.proxy_tensor (top-level), nncf.torch.utils (top-level), nncf.torch.debug (top-level), nncf.common.factory (delayed, conditional), nncf.torch.model_creation (top-level)
|
||||
missing module named torch.qscheme - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torch.layout - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torch.DispatchKey - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torchaudio._internal.fb - imported by torchaudio._internal (optional)
|
||||
missing module named sentencepiece - imported by torchaudio.pipelines.rnnt_pipeline (delayed)
|
||||
missing module named dp - imported by torchaudio.pipelines._tts.utils (delayed)
|
||||
missing module named kaldi_io - imported by torchaudio.kaldi_io (delayed)
|
||||
missing module named av.video._VideoCodecName - imported by av.video (top-level), av.codec.context (top-level), av.container.output (top-level)
|
||||
missing module named 'cython.cimports' - imported by av.packet (top-level), av.audio.codeccontext (top-level), av.filter.loudnorm (top-level)
|
||||
missing module named av.audio._AudioCodecName - imported by av.audio (top-level), av.codec.context (top-level), av.container.output (top-level)
|
||||
missing module named torcharrow - imported by torch.utils.data.datapipes.iter.callable (delayed, conditional, optional)
|
||||
missing module named _dbm - imported by dbm.ndbm (top-level)
|
||||
missing module named _gdbm - imported by dbm.gnu (top-level)
|
||||
missing module named diff - imported by dill._dill (delayed, conditional, optional)
|
||||
missing module named dill.diff - imported by dill (delayed, conditional, optional), dill._dill (delayed, conditional, optional)
|
||||
missing module named version - imported by dill (optional)
|
||||
missing module named 'jax.typing' - imported by optree.integrations.jax (top-level)
|
||||
missing module named 'jax._src' - imported by optree.integrations.jax (top-level), keras.src.backend.jax.nn (delayed, optional)
|
||||
missing module named 'torch._C._distributed_autograd' - imported by torch.distributed.autograd (conditional)
|
||||
missing module named 'einops._torch_specific' - imported by torch._dynamo.decorators (delayed, optional)
|
||||
missing module named einops - imported by torch._dynamo.decorators (delayed)
|
||||
missing module named keras.src.backend.random_seed_dtype - imported by keras.src.backend (delayed), keras.src.random.seed_generator (delayed)
|
||||
missing module named keras.src.backend.convert_to_tensor - imported by keras.src.backend (delayed), keras.src.random.seed_generator (delayed)
|
||||
missing module named 'openvino._pyopenvino.util' - imported by openvino.utils (delayed), openvino.runtime.utils (top-level)
|
||||
missing module named 'openvino._pyopenvino.op' - imported by openvino.runtime.op (top-level), openvino.runtime.op.util (top-level), nncf.openvino.optimized_functions.models (top-level)
|
||||
missing module named 'jax.nn' - imported by keras.src.backend.jax.nn (delayed, optional)
|
||||
missing module named 'jax.scipy' - imported by keras.src.backend.jax.linalg (top-level)
|
||||
missing module named 'tensorflow.experimental' - imported by keras.src.backend.tensorflow.distribution_lib (top-level)
|
||||
missing module named pygments.lexers.PrologLexer - imported by pygments.lexers (top-level), pygments.lexers.cplint (top-level)
|
||||
missing module named ctags - imported by pygments.formatters.html (optional)
|
||||
missing module named linkify_it - imported by markdown_it.main (optional)
|
||||
missing module named 'tensorflow.saved_model' - imported by keras.src.export.saved_model (delayed)
|
||||
missing module named 'tensorflow.summary' - imported by keras.src.callbacks.tensorboard (delayed, conditional)
|
||||
missing module named pydantic - imported by huggingface_hub.utils._runtime (delayed, optional), huggingface_hub._webhooks_payload (conditional)
|
||||
missing module named 'google.colab' - imported by huggingface_hub.utils._auth (delayed, optional)
|
||||
missing module named hf_transfer - imported by huggingface_hub.file_download (delayed, conditional, optional), huggingface_hub.lfs (delayed, optional)
|
||||
missing module named hf_xet - imported by huggingface_hub.file_download (delayed, optional), huggingface_hub._commit_api (delayed)
|
||||
missing module named 'mcp.client' - imported by huggingface_hub.inference._mcp.mcp_client (delayed, conditional)
|
||||
missing module named mcp - imported by huggingface_hub.inference._mcp.utils (conditional), huggingface_hub.inference._mcp.mcp_client (delayed, conditional)
|
||||
missing module named fastai - imported by huggingface_hub.fastai_utils (delayed)
|
||||
missing module named 'fastapi.responses' - imported by huggingface_hub._oauth (delayed, optional), huggingface_hub._webhooks_server (conditional)
|
||||
missing module named fastapi - imported by huggingface_hub._oauth (delayed, conditional, optional), huggingface_hub._webhooks_server (conditional)
|
||||
missing module named gradio - imported by huggingface_hub._webhooks_server (delayed, conditional)
|
||||
missing module named tensorboardX - imported by huggingface_hub._tensorboard_logger (conditional, optional)
|
||||
missing module named 'starlette.datastructures' - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named 'authlib.integrations' - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named authlib - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named starlette - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named 'ipywidgets.widgets' - imported by huggingface_hub._login (delayed, optional)
|
||||
missing module named 'InquirerPy.separator' - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named 'InquirerPy.base' - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named InquirerPy - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named pydotplus - imported by keras.src.utils.model_visualization (optional), tensorflow.python.keras.utils.vis_utils (optional)
|
||||
missing module named pydot_ng - imported by keras.src.utils.model_visualization (optional), tensorflow.python.keras.utils.vis_utils (optional)
|
||||
missing module named keras.src.ops.convert_to_tensor - imported by keras.src.ops (top-level), keras.src.utils.torch_utils (top-level)
|
||||
missing module named keras.src.ops.convert_to_numpy - imported by keras.src.ops (top-level), keras.src.utils.torch_utils (top-level)
|
||||
missing module named keras.src.backend.random - imported by keras.src.backend (top-level), keras.src.ops (top-level), keras.src.testing.test_case (delayed), keras.src.initializers.random_initializers (top-level)
|
||||
missing module named keras.src.backend.is_tensor - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.backend.cond - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.backend.cast - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.engine - imported by keras.src (conditional), nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named keras.engine - imported by keras (conditional), nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named flax - imported by keras.src.utils.jax_layer (delayed)
|
||||
missing module named array_api_strict - imported by sklearn.utils._array_api (delayed, conditional, optional)
|
||||
missing module named sklearn.externals.array_api_compat.common.array_namespace - imported by sklearn.externals.array_api_compat.common (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named 'cupy_backends.cuda' - imported by sklearn.externals.array_api_compat.common._helpers (delayed)
|
||||
missing module named torch.outer - imported by torch (top-level), sklearn.externals.array_api_compat.torch.linalg (top-level)
|
||||
missing module named 'cupy.linalg' - imported by sklearn.externals.array_api_compat.cupy.linalg (top-level)
|
||||
missing module named 'cupy.fft' - imported by sklearn.externals.array_api_compat.cupy.fft (top-level)
|
||||
missing module named array_api_compat - imported by sklearn.externals.array_api_extra._lib._utils._compat (optional)
|
||||
missing module named 'numpydoc.docscrape' - imported by sklearn.utils._testing (delayed)
|
||||
missing module named numpydoc - imported by sklearn.utils._testing (delayed, optional)
|
||||
missing module named 'distributed.utils' - imported by joblib._dask (conditional, optional)
|
||||
missing module named 'dask.utils' - imported by joblib._dask (conditional)
|
||||
missing module named 'dask.sizeof' - imported by joblib._dask (conditional)
|
||||
missing module named 'dask.distributed' - imported by joblib._dask (conditional)
|
||||
missing module named viztracer - imported by joblib.externals.loky.initializers (delayed, optional)
|
||||
missing module named 'lz4.frame' - imported by joblib.compressor (optional)
|
||||
missing module named pyamg - imported by sklearn.manifold._spectral_embedding (delayed, conditional, optional)
|
||||
missing module named 'tf_keras.optimizers' - imported by tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named tf_keras - imported by tensorflow.python.util.lazy_loader (delayed, conditional, optional), huggingface_hub.keras_mixin (conditional, optional), tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named objgraph - imported by tensorflow.python.distribute.test_util (optional)
|
||||
missing module named tblib - imported by tensorflow.python.distribute.multi_process_runner (optional)
|
||||
missing module named tensorflow.python.framework.fast_tensor_util - imported by tensorflow.python.framework (optional), tensorflow.python.framework.tensor_util (optional)
|
||||
missing module named portpicker - imported by tensorflow.python.framework.test_util (delayed), tensorflow.dtensor.python.tests.multi_client_test_util (top-level), tensorflow.python.debug.lib.grpc_debug_test_server (top-level)
|
||||
missing module named 'tensorflow.python.framework.is_mlir_bridge_test_true' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'tensorflow.python.framework.is_mlir_bridge_test_false' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'tensorflow.python.framework.is_xla_test_true' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'six.moves.urllib.request' - imported by tensorflow.python.keras.utils.data_utils (top-level)
|
||||
missing module named tensorflow.python.keras.__version__ - imported by tensorflow.python.keras (delayed), tensorflow.python.keras.saving.saving_utils (delayed), tensorflow.python.keras.saving.hdf5_format (delayed), tensorflow.python.keras.engine.training (delayed)
|
||||
missing module named tensorflow.python.keras.layers.wrappers - imported by tensorflow.python.keras.layers (delayed), tensorflow.python.keras.utils.vis_utils (delayed)
|
||||
missing module named 'tensorflow.python.training.tracking' - imported by openvino.frontend.tensorflow.utils (delayed, optional)
|
||||
missing module named paddle - imported by openvino.tools.ovc.moc_frontend.shape_utils (delayed, conditional), openvino.tools.ovc.moc_frontend.type_utils (delayed, conditional), openvino.tools.ovc.moc_frontend.paddle_frontend_utils (delayed, optional), openvino.tools.ovc.convert_impl (delayed, conditional)
|
||||
missing module named 'conda.cli' - imported by torch.utils.benchmark.examples.blas_compare_setup (optional)
|
||||
missing module named conda - imported by torch.utils.benchmark.examples.blas_compare_setup (optional)
|
||||
missing module named 'hypothesis.strategies' - imported by torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named 'hypothesis.extra' - imported by torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named torch.tensor - imported by torch (top-level), torch.utils.benchmark.utils.compare (top-level)
|
||||
missing module named torch.TensorType - imported by torch (top-level), torch.jit._passes._property_propagation (top-level)
|
||||
missing module named 'torch._C._distributed_rpc_testing' - imported by torch.distributed.rpc._testing (conditional)
|
||||
missing module named etcd - imported by torch.distributed.elastic.rendezvous.etcd_rendezvous (top-level), torch.distributed.elastic.rendezvous.etcd_store (top-level), torch.distributed.elastic.rendezvous.etcd_rendezvous_backend (top-level), torch.distributed.elastic.rendezvous.etcd_server (optional)
|
||||
missing module named 'torch.distributed.elastic.metrics.static_init' - imported by torch.distributed.elastic.metrics (optional)
|
||||
missing module named 'coremltools.models' - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named 'coremltools.converters' - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named coremltools - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named pytorch_lightning - imported by torch.ao.pruning._experimental.data_sparsifier.lightning.callbacks.data_sparsity (top-level)
|
||||
missing module named fbscribelogger - imported by torch._logging.scribe (optional)
|
||||
missing module named 'tvm.contrib' - imported by torch._dynamo.backends.tvm (delayed)
|
||||
missing module named tvm - imported by torch._dynamo.backends.tvm (delayed, conditional)
|
||||
missing module named 'torch._C._VariableFunctions' - imported by torch (conditional)
|
||||
missing module named 'tensorflow.contrib' - imported by tensorflow.python.tools.import_pb_to_tensorboard (optional)
|
||||
missing module named memory_profiler - imported by tensorflow.python.eager.memory_tests.memory_test_util (optional)
|
||||
missing module named six.moves.urllib.request - imported by six.moves.urllib (top-level), tensorflow.python.distribute.failure_handling.failure_handling_util (top-level)
|
||||
missing module named grpc_reflection - imported by grpc (optional)
|
||||
missing module named grpc_health - imported by grpc (optional)
|
||||
missing module named grpc_tools - imported by grpc._runtime_protos (delayed, optional), grpc (optional)
|
||||
missing module named 'grpc_tools.protoc' - imported by grpc._runtime_protos (delayed, conditional)
|
||||
missing module named tflite_runtime - imported by tensorflow.lite.python.metrics.metrics (conditional), tensorflow.lite.python.interpreter (conditional), tensorflow.lite.python.analyzer (conditional), tensorflow.lite.tools.visualize (conditional)
|
||||
missing module named awq - imported by openvino.frontend.pytorch.quantized (delayed, conditional, optional)
|
||||
missing module named 'transformers.pytorch_utils' - imported by openvino.frontend.pytorch.patch_model (delayed, optional)
|
||||
missing module named 'jax.lax' - imported by openvino.frontend.jax.passes (top-level)
|
||||
missing module named 'jax.core' - imported by openvino.frontend.jax.jaxpr_decoder (top-level)
|
||||
missing module named 'keras.src.utils.control_flow_util' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.src.engine.keras_tensor' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.utils.control_flow_util' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.engine.keras_tensor' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named rpds.List - imported by rpds (top-level), referencing._core (top-level)
|
||||
missing module named rpds.HashTrieSet - imported by rpds (top-level), referencing._core (top-level)
|
||||
missing module named rpds.HashTrieMap - imported by rpds (top-level), referencing._core (top-level), jsonschema._types (top-level), jsonschema.validators (top-level)
|
||||
missing module named isoduration - imported by jsonschema._format (top-level)
|
||||
missing module named uri_template - imported by jsonschema._format (top-level)
|
||||
missing module named jsonpointer - imported by jsonschema._format (top-level)
|
||||
missing module named webcolors - imported by jsonschema._format (top-level)
|
||||
missing module named rfc3339_validator - imported by jsonschema._format (top-level)
|
||||
missing module named rfc3986_validator - imported by jsonschema._format (optional)
|
||||
missing module named rfc3987 - imported by jsonschema._format (optional)
|
||||
missing module named fqdn - imported by jsonschema._format (top-level)
|
||||
missing module named openvino.properties.hint.inference_precision - imported by openvino.properties.hint (top-level), nncf.quantization.algorithms.accuracy_control.openvino_backend (top-level), nncf.openvino.engine (top-level)
|
||||
missing module named 'openvino._pyopenvino.properties' - imported by openvino.runtime.properties (top-level), openvino.runtime.properties.hint (top-level), openvino.properties (top-level), openvino.properties.hint (top-level), openvino.properties.intel_cpu (top-level), openvino.properties.intel_gpu (top-level), openvino.properties.intel_auto (top-level), openvino.properties.device (top-level), openvino.properties.log (top-level), openvino.properties.streams (top-level), nncf.openvino.optimized_functions.models (top-level)
|
||||
missing module named 'openvino._pyopenvino._offline_transformations' - imported by openvino._offline_transformations (top-level)
|
||||
missing module named 'transformers.utils' - imported by nncf.data.generators (delayed, optional)
|
||||
missing module named icu - imported by natsort.compat.locale (optional), natsort.natsort (conditional, optional)
|
||||
missing module named fastnumbers - imported by natsort.compat.fastnumbers (conditional, optional)
|
||||
missing module named 'openvino._pyopenvino.preprocess' - imported by openvino.preprocess (top-level)
|
||||
missing module named ui - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
missing module named splash - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
BIN
qt_app_pyside1/build/TrafficMonitor/xref-TrafficMonitor.html
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/xref-TrafficMonitor.html
LFS
Normal file
Binary file not shown.
93
qt_app_pyside1/build_analysis_report.md
Normal file
93
qt_app_pyside1/build_analysis_report.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# 🔍 PyInstaller Build Analysis Report
|
||||
*Generated: July 5, 2025*
|
||||
|
||||
## 🚨 Critical Issues Identified
|
||||
|
||||
### 1. **Hidden Import Failures**
|
||||
- **ERROR**: `ui.main_window` not found
|
||||
- **ERROR**: `controllers` not found
|
||||
- **CAUSE**: PyInstaller cannot find these modules as packages
|
||||
- **IMPACT**: Runtime import failures for UI and controller modules
|
||||
|
||||
### 2. **Module Structure Issues**
|
||||
- **PROBLEM**: Treating folders as modules without proper `__init__.py` files
|
||||
- **AFFECTED**: `ui/`, `controllers/`, `utils/` directories
|
||||
- **CONSEQUENCE**: Import resolution failures
|
||||
|
||||
### 3. **Massive Dependencies**
|
||||
- **SIZE**: Build includes TensorFlow (2.19.0), PyTorch (2.5.1), SciKit-learn, etc.
|
||||
- **IMPACT**: ~800MB+ executable with unnecessary ML libraries
|
||||
- **BLOAT**: Most dependencies unused by traffic monitoring app
|
||||
|
||||
### 4. **Deprecation Warnings**
|
||||
- **TorchScript**: Multiple deprecation warnings
|
||||
- **torch.distributed**: Legacy API warnings
|
||||
- **NNCF**: Version mismatch warnings (torch 2.5.1 vs recommended 2.6.*)
|
||||
|
||||
## ✅ Successful Components
|
||||
- ✓ PySide6 Qt framework detected and integrated
|
||||
- ✓ OpenCV (cv2) hooks processed successfully
|
||||
- ✓ NumPy and core scientific libraries included
|
||||
- ✓ Build completed without fatal errors
|
||||
|
||||
## 🛠️ Recommended Fixes
|
||||
|
||||
### **Immediate Fixes**
|
||||
1. **Add `__init__.py` files** to make directories proper Python packages
|
||||
2. **Fix hidden imports** with correct module paths
|
||||
3. **Exclude unused dependencies** to reduce size
|
||||
4. **Add specific imports** for UI components
|
||||
|
||||
### **Optimized Build Command**
|
||||
```bash
|
||||
pyinstaller --onefile --console --name=FixedDebug ^
|
||||
--add-data="ui;ui" ^
|
||||
--add-data="controllers;controllers" ^
|
||||
--add-data="utils;utils" ^
|
||||
--add-data="config.json;." ^
|
||||
--hidden-import=ui.main_window ^
|
||||
--hidden-import=controllers.video_controller_new ^
|
||||
--hidden-import=utils.crosswalk_utils_advanced ^
|
||||
--hidden-import=utils.traffic_light_utils ^
|
||||
--hidden-import=cv2 ^
|
||||
--hidden-import=openvino ^
|
||||
--hidden-import=numpy ^
|
||||
--hidden-import=PySide6.QtCore ^
|
||||
--hidden-import=PySide6.QtWidgets ^
|
||||
--hidden-import=PySide6.QtGui ^
|
||||
--exclude-module=tensorflow ^
|
||||
--exclude-module=torch ^
|
||||
--exclude-module=sklearn ^
|
||||
--exclude-module=matplotlib ^
|
||||
--exclude-module=pandas ^
|
||||
main.py
|
||||
```
|
||||
|
||||
### **Size Optimization**
|
||||
- **Current**: ~800MB+ with ML libraries
|
||||
- **Optimized**: ~200-300MB without unused dependencies
|
||||
- **Core only**: PySide6 + OpenVINO + OpenCV + app code
|
||||
|
||||
## 🎯 Runtime Risk Assessment
|
||||
|
||||
### **High Risk**
|
||||
- UI module import failures
|
||||
- Controller module missing
|
||||
- Configuration file access issues
|
||||
|
||||
### **Medium Risk**
|
||||
- Missing utility modules
|
||||
- OpenVINO model loading
|
||||
- Resource file access
|
||||
|
||||
### **Low Risk**
|
||||
- Core PySide6 functionality
|
||||
- OpenCV operations
|
||||
- Basic Python libraries
|
||||
|
||||
## 📋 Next Steps
|
||||
1. Create missing `__init__.py` files
|
||||
2. Test optimized build command
|
||||
3. Run executable and capture any runtime errors
|
||||
4. Verify all UI components load correctly
|
||||
5. Test complete pipeline functionality
|
||||
189
qt_app_pyside1/build_exe.py
Normal file
189
qt_app_pyside1/build_exe.py
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive build script for Traffic Monitor application
|
||||
This script handles the complete build process with all necessary PyInstaller flags
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
def run_command(command, description):
|
||||
"""Run a command and handle errors"""
|
||||
print(f"\n🔧 {description}")
|
||||
print(f"Running: {command}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
|
||||
print("✅ Success!")
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"❌ Error: {e}")
|
||||
if e.stdout:
|
||||
print("STDOUT:", e.stdout)
|
||||
if e.stderr:
|
||||
print("STDERR:", e.stderr)
|
||||
return False
|
||||
|
||||
def build_application():
|
||||
"""Build the application with PyInstaller"""
|
||||
|
||||
# Get current directory
|
||||
current_dir = Path.cwd()
|
||||
print(f"Building from: {current_dir}")
|
||||
|
||||
# Clean previous builds
|
||||
print("\n🧹 Cleaning previous builds...")
|
||||
for folder in ['build', 'dist']:
|
||||
if os.path.exists(folder):
|
||||
shutil.rmtree(folder)
|
||||
print(f"Removed {folder}")
|
||||
|
||||
if os.path.exists('TrafficMonitor.spec'):
|
||||
os.remove('TrafficMonitor.spec')
|
||||
print("Removed old spec file")
|
||||
|
||||
# Define PyInstaller command with all necessary flags
|
||||
pyinstaller_cmd = [
|
||||
'pyinstaller',
|
||||
'--name=TrafficMonitor',
|
||||
'--windowed', # Remove for debugging
|
||||
'--onefile',
|
||||
'--icon=resources/icon.ico' if os.path.exists('resources/icon.ico') else '',
|
||||
|
||||
# Add data files and folders
|
||||
'--add-data=ui;ui',
|
||||
'--add-data=controllers;controllers',
|
||||
'--add-data=utils;utils',
|
||||
'--add-data=openvino_models;openvino_models',
|
||||
'--add-data=resources;resources' if os.path.exists('resources') else '',
|
||||
'--add-data=config.json;.',
|
||||
'--add-data=splash.py;.',
|
||||
|
||||
# Hidden imports for modules PyInstaller might miss
|
||||
'--hidden-import=cv2',
|
||||
'--hidden-import=openvino',
|
||||
'--hidden-import=numpy',
|
||||
'--hidden-import=PySide6.QtCore',
|
||||
'--hidden-import=PySide6.QtWidgets',
|
||||
'--hidden-import=PySide6.QtGui',
|
||||
'--hidden-import=json',
|
||||
'--hidden-import=os',
|
||||
'--hidden-import=sys',
|
||||
'--hidden-import=time',
|
||||
'--hidden-import=traceback',
|
||||
'--hidden-import=pathlib',
|
||||
|
||||
# Main script
|
||||
'main.py'
|
||||
]
|
||||
|
||||
# Remove empty icon parameter if no icon exists
|
||||
pyinstaller_cmd = [arg for arg in pyinstaller_cmd if arg and not arg.startswith('--icon=') or os.path.exists(arg.split('=')[1] if '=' in arg else '')]
|
||||
|
||||
# Convert to string command
|
||||
cmd_str = ' '.join(f'"{arg}"' if ' ' in arg else arg for arg in pyinstaller_cmd)
|
||||
|
||||
# Build the application
|
||||
if run_command(cmd_str, "Building Traffic Monitor application"):
|
||||
print(f"\n✅ Build completed successfully!")
|
||||
print(f"Executable location: {current_dir}/dist/TrafficMonitor.exe")
|
||||
return True
|
||||
else:
|
||||
print(f"\n❌ Build failed!")
|
||||
return False
|
||||
|
||||
def build_debug_version():
|
||||
"""Build a debug version with console output"""
|
||||
|
||||
print("\n🔧 Building debug version...")
|
||||
|
||||
# Define PyInstaller command for debug build
|
||||
pyinstaller_cmd = [
|
||||
'pyinstaller',
|
||||
'--name=TrafficMonitorDebug',
|
||||
'--console', # Enable console for debugging
|
||||
'--onefile',
|
||||
|
||||
# Add data files and folders
|
||||
'--add-data=ui;ui',
|
||||
'--add-data=controllers;controllers',
|
||||
'--add-data=utils;utils',
|
||||
'--add-data=openvino_models;openvino_models',
|
||||
'--add-data=resources;resources' if os.path.exists('resources') else '',
|
||||
'--add-data=config.json;.',
|
||||
'--add-data=splash.py;.',
|
||||
|
||||
# Hidden imports
|
||||
'--hidden-import=cv2',
|
||||
'--hidden-import=openvino',
|
||||
'--hidden-import=numpy',
|
||||
'--hidden-import=PySide6.QtCore',
|
||||
'--hidden-import=PySide6.QtWidgets',
|
||||
'--hidden-import=PySide6.QtGui',
|
||||
'--hidden-import=json',
|
||||
'--hidden-import=os',
|
||||
'--hidden-import=sys',
|
||||
'--hidden-import=time',
|
||||
'--hidden-import=traceback',
|
||||
'--hidden-import=pathlib',
|
||||
|
||||
# Main script
|
||||
'main.py'
|
||||
]
|
||||
|
||||
# Convert to string command
|
||||
cmd_str = ' '.join(f'"{arg}"' if ' ' in arg else arg for arg in pyinstaller_cmd)
|
||||
|
||||
return run_command(cmd_str, "Building debug version")
|
||||
|
||||
def main():
|
||||
"""Main build process"""
|
||||
print("🚀 Traffic Monitor Build Script")
|
||||
print("=" * 50)
|
||||
|
||||
# Check if PyInstaller is available
|
||||
try:
|
||||
subprocess.run(['pyinstaller', '--version'], check=True, capture_output=True)
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
print("❌ PyInstaller not found. Installing...")
|
||||
if not run_command('pip install pyinstaller', "Installing PyInstaller"):
|
||||
print("Failed to install PyInstaller")
|
||||
return False
|
||||
|
||||
# Check for required files
|
||||
required_files = ['main.py', 'ui', 'controllers', 'utils', 'config.json']
|
||||
missing_files = [f for f in required_files if not os.path.exists(f)]
|
||||
|
||||
if missing_files:
|
||||
print(f"❌ Missing required files/folders: {missing_files}")
|
||||
return False
|
||||
|
||||
print("✅ All required files found")
|
||||
|
||||
# Build debug version first
|
||||
if build_debug_version():
|
||||
print("\n✅ Debug build completed!")
|
||||
print(f"Debug executable: {Path.cwd()}/dist/TrafficMonitorDebug.exe")
|
||||
|
||||
# Build main application
|
||||
if build_application():
|
||||
print(f"\n🎉 All builds completed successfully!")
|
||||
print(f"Main executable: {Path.cwd()}/dist/TrafficMonitor.exe")
|
||||
print(f"Debug executable: {Path.cwd()}/dist/TrafficMonitorDebug.exe")
|
||||
|
||||
print(f"\n📝 To test:")
|
||||
print(f"1. Run debug version first: dist\\TrafficMonitorDebug.exe")
|
||||
print(f"2. If working, run main version: dist\\TrafficMonitor.exe")
|
||||
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
203
qt_app_pyside1/build_exe_optimized.py
Normal file
203
qt_app_pyside1/build_exe_optimized.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""
|
||||
OPTIMIZED PYINSTALLER BUILD SCRIPT v2.0
|
||||
========================================
|
||||
This script addresses all critical errors and warnings from the build log:
|
||||
|
||||
Critical Fixes:
|
||||
1. Missing __init__.py files (fixed by creating them)
|
||||
2. Missing hidden imports (cv2, json modules)
|
||||
3. Correct data file inclusion
|
||||
4. Platform-specific optimizations
|
||||
|
||||
Usage: python build_exe_optimized.py
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
def clean_build_artifacts():
|
||||
"""Clean previous build artifacts"""
|
||||
print("🧹 Cleaning previous build artifacts...")
|
||||
|
||||
artifacts = ['build', 'dist', '*.spec']
|
||||
for artifact in artifacts:
|
||||
if os.path.exists(artifact):
|
||||
if os.path.isdir(artifact):
|
||||
shutil.rmtree(artifact)
|
||||
print(f" Removed directory: {artifact}")
|
||||
else:
|
||||
os.remove(artifact)
|
||||
print(f" Removed file: {artifact}")
|
||||
|
||||
def verify_dependencies():
|
||||
"""Verify all required packages are installed"""
|
||||
print("📦 Verifying dependencies...")
|
||||
|
||||
required_packages = [
|
||||
'PySide6', 'opencv-python', 'numpy', 'openvino',
|
||||
'ultralytics', 'matplotlib', 'pillow', 'requests'
|
||||
]
|
||||
|
||||
missing_packages = []
|
||||
for package in required_packages:
|
||||
try:
|
||||
__import__(package.lower().replace('-', '_'))
|
||||
print(f" ✓ {package}")
|
||||
except ImportError:
|
||||
missing_packages.append(package)
|
||||
print(f" ✗ {package} - MISSING")
|
||||
|
||||
if missing_packages:
|
||||
print(f"\n❌ Missing packages: {', '.join(missing_packages)}")
|
||||
print(" Install with: pip install " + " ".join(missing_packages))
|
||||
return False
|
||||
|
||||
print(" ✓ All dependencies verified")
|
||||
return True
|
||||
|
||||
def build_executable():
|
||||
"""Build the executable with optimized PyInstaller command"""
|
||||
print("🔨 Building executable...")
|
||||
|
||||
# Core PyInstaller command with ALL critical fixes
|
||||
cmd = [
|
||||
'pyinstaller',
|
||||
'--name=TrafficMonitoringApp',
|
||||
'--onefile', # Single executable
|
||||
'--windowed', # No console window
|
||||
'--icon=resources/app_icon.ico' if os.path.exists('resources/app_icon.ico') else '',
|
||||
|
||||
# === CRITICAL HIDDEN IMPORTS (Fixes Build Errors) ===
|
||||
'--hidden-import=cv2',
|
||||
'--hidden-import=cv2.cv2',
|
||||
'--hidden-import=numpy',
|
||||
'--hidden-import=numpy.core',
|
||||
'--hidden-import=openvino',
|
||||
'--hidden-import=openvino.runtime',
|
||||
'--hidden-import=ultralytics',
|
||||
'--hidden-import=ultralytics.engine',
|
||||
'--hidden-import=PySide6.QtCore',
|
||||
'--hidden-import=PySide6.QtWidgets',
|
||||
'--hidden-import=PySide6.QtGui',
|
||||
'--hidden-import=json',
|
||||
'--hidden-import=pathlib',
|
||||
'--hidden-import=threading',
|
||||
'--hidden-import=queue',
|
||||
|
||||
# === UI/CONTROLLER MODULES ===
|
||||
'--hidden-import=ui',
|
||||
'--hidden-import=ui.main_window',
|
||||
'--hidden-import=ui.main_window1',
|
||||
'--hidden-import=controllers',
|
||||
'--hidden-import=controllers.video_controller',
|
||||
'--hidden-import=utils',
|
||||
'--hidden-import=utils.detection_utils',
|
||||
'--hidden-import=utils.tracking_utils',
|
||||
'--hidden-import=utils.crosswalk_utils_advanced',
|
||||
'--hidden-import=utils.traffic_light_utils',
|
||||
|
||||
# === EXCLUDE HEAVY/UNUSED MODULES (Reduces Size) ===
|
||||
'--exclude-module=matplotlib.backends._backend_pdf',
|
||||
'--exclude-module=matplotlib.backends._backend_ps',
|
||||
'--exclude-module=matplotlib.backends._backend_svg',
|
||||
'--exclude-module=tkinter',
|
||||
'--exclude-module=PyQt5',
|
||||
'--exclude-module=unittest',
|
||||
'--exclude-module=test',
|
||||
'--exclude-module=distutils',
|
||||
|
||||
# === DATA FILES AND DIRECTORIES ===
|
||||
'--add-data=config.json;.',
|
||||
'--add-data=resources;resources',
|
||||
'--add-data=openvino_models;openvino_models',
|
||||
'--add-data=ui;ui',
|
||||
'--add-data=controllers;controllers',
|
||||
'--add-data=utils;utils',
|
||||
|
||||
# === SPLASH SCREEN ===
|
||||
'--splash=resources/splash.png' if os.path.exists('resources/splash.png') else '',
|
||||
|
||||
# === MAIN SCRIPT ===
|
||||
'main.py'
|
||||
]
|
||||
|
||||
# Remove empty strings from command
|
||||
cmd = [arg for arg in cmd if arg]
|
||||
|
||||
print("📋 PyInstaller command:")
|
||||
print(" " + " ".join(cmd))
|
||||
print()
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
print("✅ Build completed successfully!")
|
||||
print(f"📁 Executable location: dist/TrafficMonitoringApp.exe")
|
||||
return True
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("❌ Build failed!")
|
||||
print("STDOUT:", e.stdout)
|
||||
print("STDERR:", e.stderr)
|
||||
return False
|
||||
|
||||
def post_build_verification():
|
||||
"""Verify the built executable"""
|
||||
print("🔍 Post-build verification...")
|
||||
|
||||
exe_path = Path('dist/TrafficMonitoringApp.exe')
|
||||
if exe_path.exists():
|
||||
size_mb = exe_path.stat().st_size / (1024 * 1024)
|
||||
print(f" ✓ Executable created: {size_mb:.1f} MB")
|
||||
|
||||
# Check if critical files are bundled
|
||||
print(" 📋 Bundled resources check:")
|
||||
print(" - config.json: Expected in executable")
|
||||
print(" - openvino_models/: Expected in executable")
|
||||
print(" - resources/: Expected in executable")
|
||||
|
||||
return True
|
||||
else:
|
||||
print(" ❌ Executable not found!")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Main build process"""
|
||||
print("🚀 TRAFFIC MONITORING APP - OPTIMIZED BUILD")
|
||||
print("=" * 50)
|
||||
|
||||
# Step 1: Clean artifacts
|
||||
clean_build_artifacts()
|
||||
print()
|
||||
|
||||
# Step 2: Verify dependencies
|
||||
if not verify_dependencies():
|
||||
print("\n❌ Build aborted due to missing dependencies")
|
||||
sys.exit(1)
|
||||
print()
|
||||
|
||||
# Step 3: Build executable
|
||||
if not build_executable():
|
||||
print("\n❌ Build failed")
|
||||
sys.exit(1)
|
||||
print()
|
||||
|
||||
# Step 4: Post-build verification
|
||||
if not post_build_verification():
|
||||
print("\n⚠️ Build completed but verification failed")
|
||||
sys.exit(1)
|
||||
|
||||
print("\n🎉 BUILD SUCCESSFUL!")
|
||||
print("=" * 50)
|
||||
print("📁 Executable: dist/TrafficMonitoringApp.exe")
|
||||
print("🏃 To run: dist\\TrafficMonitoringApp.exe")
|
||||
print("\n💡 Next steps:")
|
||||
print(" 1. Test the executable in a clean environment")
|
||||
print(" 2. Verify all UI elements load correctly")
|
||||
print(" 3. Test video processing and violation detection")
|
||||
print(" 4. Check configuration file loading")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
33
qt_app_pyside1/config.json
Normal file
33
qt_app_pyside1/config.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"detection": {
|
||||
"confidence_threshold": 0.5,
|
||||
"enable_ocr": true,
|
||||
"enable_tracking": true,
|
||||
"model_path": null,
|
||||
"device": "CPU"
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5,
|
||||
"enable_lane": true,
|
||||
"enable_red_light": true,
|
||||
"enable_speed": true,
|
||||
"enable_stop_sign": true
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": true,
|
||||
"show_labels": true,
|
||||
"show_license_plates": true,
|
||||
"show_performance": true
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
},
|
||||
"analytics": {
|
||||
"enable_charts": true,
|
||||
"history_length": 1000
|
||||
}
|
||||
}
|
||||
1
qt_app_pyside1/controllers/__init__.py
Normal file
1
qt_app_pyside1/controllers/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Controllers package for Traffic Monitoring System
|
||||
BIN
qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
341
qt_app_pyside1/controllers/analytics_controller.py
Normal file
341
qt_app_pyside1/controllers/analytics_controller.py
Normal file
@@ -0,0 +1,341 @@
|
||||
from PySide6.QtCore import QObject, Signal, Slot
|
||||
import numpy as np
|
||||
from collections import defaultdict, deque
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any
|
||||
|
||||
class AnalyticsController(QObject):
|
||||
"""
|
||||
Controller for traffic analytics and statistics.
|
||||
|
||||
Manages:
|
||||
- Vehicle counts by class
|
||||
- Violation statistics
|
||||
- Temporal analytics (traffic over time)
|
||||
- Speed statistics
|
||||
"""
|
||||
analytics_updated = Signal(dict) # Emitted when analytics are updated
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the analytics controller"""
|
||||
super().__init__()
|
||||
|
||||
# Detection statistics
|
||||
self.detection_counts = defaultdict(int)
|
||||
self.detection_history = []
|
||||
|
||||
# Violation statistics
|
||||
self.violation_counts = defaultdict(int)
|
||||
self.violation_history = []
|
||||
|
||||
# Time series data (for charts)
|
||||
self.time_series = {
|
||||
'timestamps': [],
|
||||
'vehicle_counts': [],
|
||||
'pedestrian_counts': [],
|
||||
'violation_counts': []
|
||||
}
|
||||
|
||||
# Performance metrics
|
||||
self.fps_history = deque(maxlen=100)
|
||||
self.processing_times = deque(maxlen=100)
|
||||
|
||||
# Aggregated metrics
|
||||
self.aggregated_metrics = {
|
||||
'total_vehicles': 0,
|
||||
'total_pedestrians': 0,
|
||||
'total_violations': 0,
|
||||
'avg_processing_time': 0,
|
||||
'avg_fps': 0,
|
||||
'peak_vehicle_count': 0,
|
||||
'peak_violation_hour': None
|
||||
}
|
||||
|
||||
# Initialize current time window
|
||||
self.current_window = datetime.now().replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
)
|
||||
self.window_stats = defaultdict(int)
|
||||
|
||||
# Add traffic light analytics
|
||||
self.traffic_light_counts = defaultdict(int) # Counts by color
|
||||
self.traffic_light_color_series = [] # List of (timestamp, color)
|
||||
self.traffic_light_color_numeric = [] # For charting: 0=unknown, 1=red, 2=yellow, 3=green
|
||||
self.traffic_light_color_map = {'unknown': 0, 'red': 1, 'yellow': 2, 'green': 3}
|
||||
|
||||
self._last_update = time.time()
|
||||
@Slot(object, list, float)
|
||||
def process_frame_data(self, frame, detections, metrics):
|
||||
"""
|
||||
Process frame data for analytics.
|
||||
|
||||
Args:
|
||||
frame: Video frame
|
||||
detections: List of detections
|
||||
metrics: Dictionary containing metrics like 'detection_fps' or directly the fps value
|
||||
"""
|
||||
try:
|
||||
# Empty violations list since violation detection is disabled
|
||||
violations = []
|
||||
|
||||
# Debug info
|
||||
det_count = len(detections) if detections else 0
|
||||
print(f"Analytics processing: {det_count} detections")
|
||||
except Exception as e:
|
||||
print(f"Error in process_frame_data initialization: {e}")
|
||||
violations = []
|
||||
# Update FPS history - safely handle different metrics formats
|
||||
try:
|
||||
if isinstance(metrics, dict):
|
||||
fps = metrics.get('detection_fps', None)
|
||||
if isinstance(fps, (int, float)):
|
||||
self.fps_history.append(fps)
|
||||
elif isinstance(metrics, (int, float)):
|
||||
# Handle case where metrics is directly the fps value
|
||||
self.fps_history.append(metrics)
|
||||
else:
|
||||
# Fallback if metrics is neither dict nor numeric
|
||||
print(f"Warning: Unexpected metrics type: {type(metrics)}")
|
||||
except Exception as e:
|
||||
print(f"Error processing metrics: {e}")
|
||||
# Add a default value to keep analytics running
|
||||
self.fps_history.append(0.0)
|
||||
|
||||
# Process detections
|
||||
vehicle_count = 0
|
||||
pedestrian_count = 0
|
||||
|
||||
# --- Traffic light analytics ---
|
||||
traffic_light_count = 0
|
||||
traffic_light_colors = []
|
||||
for det in detections:
|
||||
class_name = det.get('class_name', 'unknown').lower()
|
||||
self.detection_counts[class_name] += 1
|
||||
|
||||
# Track vehicles vs pedestrians
|
||||
if class_name in ['car', 'truck', 'bus', 'motorcycle']:
|
||||
vehicle_count += 1
|
||||
elif class_name == 'person':
|
||||
pedestrian_count += 1
|
||||
if class_name in ['traffic light', 'trafficlight', 'tl', 'signal']:
|
||||
traffic_light_count += 1
|
||||
color = det.get('traffic_light_color', {}).get('color', 'unknown')
|
||||
self.traffic_light_counts[color] += 1
|
||||
traffic_light_colors.append(color)
|
||||
# Track most common color for this frame
|
||||
if traffic_light_colors:
|
||||
from collections import Counter
|
||||
most_common_color = Counter(traffic_light_colors).most_common(1)[0][0]
|
||||
else:
|
||||
most_common_color = 'unknown'
|
||||
now_dt = datetime.now()
|
||||
self.traffic_light_color_series.append((now_dt.strftime('%H:%M:%S'), most_common_color))
|
||||
self.traffic_light_color_numeric.append(self.traffic_light_color_map.get(most_common_color, 0))
|
||||
# Keep last 60 points
|
||||
if len(self.traffic_light_color_series) > 60:
|
||||
self.traffic_light_color_series = self.traffic_light_color_series[-60:]
|
||||
self.traffic_light_color_numeric = self.traffic_light_color_numeric[-60:]
|
||||
|
||||
# Update time series data (once per second)
|
||||
now = time.time()
|
||||
if now - self._last_update >= 1.0:
|
||||
self._update_time_series(vehicle_count, pedestrian_count, len(violations), most_common_color)
|
||||
self._last_update = now
|
||||
|
||||
# Update aggregated metrics
|
||||
self._update_aggregated_metrics()
|
||||
|
||||
# Emit updated analytics
|
||||
self.analytics_updated.emit(self.get_analytics())
|
||||
|
||||
def _update_time_series(self, vehicle_count, pedestrian_count, violation_count, traffic_light_color=None):
|
||||
"""Update time series data for charts"""
|
||||
now = datetime.now()
|
||||
|
||||
# Check if we've moved to a new hour
|
||||
if now.hour != self.current_window.hour or now.day != self.current_window.day:
|
||||
# Save current window stats
|
||||
self._save_window_stats()
|
||||
|
||||
# Reset for new window
|
||||
self.current_window = now.replace(minute=0, second=0, microsecond=0)
|
||||
self.window_stats = defaultdict(int)
|
||||
# Add current counts to window
|
||||
self.window_stats['vehicles'] += vehicle_count
|
||||
self.window_stats['pedestrians'] += pedestrian_count
|
||||
self.window_stats['violations'] += violation_count
|
||||
|
||||
# Add to time series
|
||||
self.time_series['timestamps'].append(now.strftime('%H:%M:%S'))
|
||||
self.time_series['vehicle_counts'].append(vehicle_count)
|
||||
self.time_series['pedestrian_counts'].append(pedestrian_count)
|
||||
self.time_series['violation_counts'].append(violation_count)
|
||||
|
||||
# Add traffic light color to time series
|
||||
if traffic_light_color is not None:
|
||||
if 'traffic_light_colors' not in self.time_series:
|
||||
self.time_series['traffic_light_colors'] = []
|
||||
self.time_series['traffic_light_colors'].append(traffic_light_color)
|
||||
if len(self.time_series['traffic_light_colors']) > 60:
|
||||
self.time_series['traffic_light_colors'] = self.time_series['traffic_light_colors'][-60:]
|
||||
|
||||
# Keep last 60 data points (1 minute at 1 Hz)
|
||||
if len(self.time_series['timestamps']) > 60:
|
||||
for key in self.time_series:
|
||||
self.time_series[key] = self.time_series[key][-60:]
|
||||
|
||||
def _save_window_stats(self):
|
||||
"""Save stats for the current time window"""
|
||||
if sum(self.window_stats.values()) > 0:
|
||||
window_info = {
|
||||
'time': self.current_window,
|
||||
'vehicles': self.window_stats['vehicles'],
|
||||
'pedestrians': self.window_stats['pedestrians'],
|
||||
'violations': self.window_stats['violations']
|
||||
}
|
||||
|
||||
# Update peak stats
|
||||
if window_info['vehicles'] > self.aggregated_metrics['peak_vehicle_count']:
|
||||
self.aggregated_metrics['peak_vehicle_count'] = window_info['vehicles']
|
||||
|
||||
if window_info['violations'] > 0:
|
||||
if self.aggregated_metrics['peak_violation_hour'] is None or \
|
||||
window_info['violations'] > self.aggregated_metrics['peak_violation_hour']['violations']:
|
||||
self.aggregated_metrics['peak_violation_hour'] = {
|
||||
'time': self.current_window.strftime('%H:%M'),
|
||||
'violations': window_info['violations']
|
||||
}
|
||||
|
||||
def _update_aggregated_metrics(self):
|
||||
"""Update aggregated analytics metrics"""
|
||||
# Count totals
|
||||
self.aggregated_metrics['total_vehicles'] = sum([
|
||||
self.detection_counts[c] for c in
|
||||
['car', 'truck', 'bus', 'motorcycle']
|
||||
])
|
||||
self.aggregated_metrics['total_pedestrians'] = self.detection_counts['person']
|
||||
self.aggregated_metrics['total_violations'] = sum(self.violation_counts.values())
|
||||
|
||||
# Average FPS
|
||||
if self.fps_history:
|
||||
# Only sum numbers, skip dicts
|
||||
numeric_fps = [f for f in self.fps_history if isinstance(f, (int, float))]
|
||||
if numeric_fps:
|
||||
self.aggregated_metrics['avg_fps'] = sum(numeric_fps) / len(numeric_fps)
|
||||
else:
|
||||
self.aggregated_metrics['avg_fps'] = 0.0
|
||||
|
||||
# Average processing time
|
||||
if self.processing_times:
|
||||
self.aggregated_metrics['avg_processing_time'] = sum(self.processing_times) / len(self.processing_times)
|
||||
|
||||
def get_analytics(self) -> Dict:
|
||||
"""
|
||||
Get current analytics data.
|
||||
|
||||
Returns:
|
||||
Dictionary of analytics data
|
||||
"""
|
||||
return {
|
||||
'detection_counts': dict(self.detection_counts),
|
||||
'violation_counts': dict(self.violation_counts),
|
||||
'time_series': self.time_series,
|
||||
'metrics': self.aggregated_metrics,
|
||||
'recent_violations': self.violation_history[-10:] if self.violation_history else [],
|
||||
'traffic_light_counts': dict(self.traffic_light_counts),
|
||||
'traffic_light_color_series': self.traffic_light_color_series,
|
||||
'traffic_light_color_numeric': self.traffic_light_color_numeric
|
||||
}
|
||||
|
||||
def get_violation_history(self) -> List:
|
||||
"""
|
||||
Get violation history.
|
||||
|
||||
Returns:
|
||||
List of violation events
|
||||
"""
|
||||
return self.violation_history.copy()
|
||||
|
||||
def clear_statistics(self):
|
||||
"""Reset all statistics"""
|
||||
self.detection_counts = defaultdict(int)
|
||||
self.violation_counts = defaultdict(int)
|
||||
self.detection_history = []
|
||||
self.violation_history = []
|
||||
self.time_series = {
|
||||
'timestamps': [],
|
||||
'vehicle_counts': [],
|
||||
'pedestrian_counts': [],
|
||||
'violation_counts': []
|
||||
}
|
||||
self.fps_history.clear()
|
||||
self.processing_times.clear()
|
||||
self.window_stats = defaultdict(int)
|
||||
self.aggregated_metrics = {
|
||||
'total_vehicles': 0,
|
||||
'total_pedestrians': 0,
|
||||
'total_violations': 0,
|
||||
'avg_processing_time': 0,
|
||||
'avg_fps': 0,
|
||||
'peak_vehicle_count': 0,
|
||||
'peak_violation_hour': None
|
||||
}
|
||||
|
||||
def register_violation(self, violation):
|
||||
"""
|
||||
Register a new violation in the analytics.
|
||||
|
||||
Args:
|
||||
violation: Dictionary with violation information
|
||||
"""
|
||||
try:
|
||||
# Add to violation counts - check both 'violation' and 'violation_type' keys
|
||||
violation_type = violation.get('violation_type') or violation.get('violation', 'unknown')
|
||||
self.violation_counts[violation_type] += 1
|
||||
|
||||
# Add to violation history
|
||||
self.violation_history.append(violation)
|
||||
|
||||
# Update time series
|
||||
now = datetime.now()
|
||||
self.time_series['timestamps'].append(now)
|
||||
|
||||
# If we've been running for a while, we might need to drop old timestamps
|
||||
if len(self.time_series['timestamps']) > 100: # Keep last 100 points
|
||||
self.time_series['timestamps'] = self.time_series['timestamps'][-100:]
|
||||
self.time_series['vehicle_counts'] = self.time_series['vehicle_counts'][-100:]
|
||||
self.time_series['pedestrian_counts'] = self.time_series['pedestrian_counts'][-100:]
|
||||
self.time_series['violation_counts'] = self.time_series['violation_counts'][-100:]
|
||||
|
||||
# Append current totals to time series
|
||||
self.time_series['violation_counts'].append(sum(self.violation_counts.values()))
|
||||
|
||||
# Make sure all time series have the same length
|
||||
while len(self.time_series['vehicle_counts']) < len(self.time_series['timestamps']):
|
||||
self.time_series['vehicle_counts'].append(sum(self.detection_counts.get(c, 0)
|
||||
for c in ['car', 'truck', 'bus', 'motorcycle']))
|
||||
|
||||
while len(self.time_series['pedestrian_counts']) < len(self.time_series['timestamps']):
|
||||
self.time_series['pedestrian_counts'].append(self.detection_counts.get('person', 0))
|
||||
|
||||
# Update aggregated metrics
|
||||
self.aggregated_metrics['total_violations'] = sum(self.violation_counts.values())
|
||||
|
||||
# Emit updated analytics
|
||||
self._emit_analytics_update()
|
||||
|
||||
print(f"📊 Registered violation in analytics: {violation_type}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error registering violation in analytics: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _emit_analytics_update(self):
|
||||
"""Emit analytics update signal with current data"""
|
||||
try:
|
||||
self.analytics_updated.emit(self.get_analytics())
|
||||
except Exception as e:
|
||||
print(f"❌ Error emitting analytics update: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
1085
qt_app_pyside1/controllers/bytetrack_demo.py
Normal file
1085
qt_app_pyside1/controllers/bytetrack_demo.py
Normal file
File diff suppressed because it is too large
Load Diff
550
qt_app_pyside1/controllers/bytetrack_tracker.py
Normal file
550
qt_app_pyside1/controllers/bytetrack_tracker.py
Normal file
@@ -0,0 +1,550 @@
|
||||
# ByteTrack implementation for vehicle tracking
|
||||
# Efficient and robust multi-object tracking that works exactly like DeepSORT
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
from collections import defaultdict, deque
|
||||
from typing import List, Dict, Any, Tuple, Optional
|
||||
import torch
|
||||
|
||||
class Track:
|
||||
"""Track class for ByteTracker - Compatible with video_controller_new.py"""
|
||||
|
||||
def __init__(self, detection, track_id):
|
||||
"""Initialize a track from a detection
|
||||
|
||||
Args:
|
||||
detection: Detection array [x1, y1, x2, y2, score, class_id]
|
||||
track_id: Unique track ID
|
||||
"""
|
||||
self.track_id = track_id
|
||||
self.tlbr = detection[:4] # [x1, y1, x2, y2]
|
||||
self.score = detection[4] if len(detection) > 4 else 0.5
|
||||
self.class_id = int(detection[5]) if len(detection) > 5 else 0
|
||||
|
||||
self.time_since_update = 0
|
||||
self.hits = 1 # Number of times track was matched to a detection
|
||||
self.age = 1
|
||||
self.frame_id = 0 # Will be set by the tracker during update
|
||||
self.is_lost = False # Flag to indicate if track is lost
|
||||
self.state = 'Tentative' # Track state: Tentative, Confirmed, Deleted
|
||||
|
||||
# Store position history for movement tracking
|
||||
self.history = deque(maxlen=30)
|
||||
self.history.append(self.tlbr.copy())
|
||||
|
||||
# Simple velocity estimation
|
||||
self.velocity = np.array([0., 0.])
|
||||
|
||||
def predict(self):
|
||||
"""Predict the next state using simple motion model"""
|
||||
if len(self.history) >= 2:
|
||||
# Simple velocity estimation from last two positions
|
||||
curr_center = np.array([(self.tlbr[0] + self.tlbr[2])/2, (self.tlbr[1] + self.tlbr[3])/2])
|
||||
prev_tlbr = self.history[-2]
|
||||
prev_center = np.array([(prev_tlbr[0] + prev_tlbr[2])/2, (prev_tlbr[1] + prev_tlbr[3])/2])
|
||||
self.velocity = curr_center - prev_center
|
||||
|
||||
# Predict next position
|
||||
next_center = curr_center + self.velocity
|
||||
w, h = self.tlbr[2] - self.tlbr[0], self.tlbr[3] - self.tlbr[1]
|
||||
self.tlbr = np.array([next_center[0] - w/2, next_center[1] - h/2,
|
||||
next_center[0] + w/2, next_center[1] + h/2])
|
||||
|
||||
self.age += 1
|
||||
self.time_since_update += 1
|
||||
|
||||
def update(self, detection):
|
||||
"""Update track with new detection"""
|
||||
self.tlbr = detection[:4]
|
||||
self.score = detection[4] if len(detection) > 4 else self.score
|
||||
self.class_id = int(detection[5]) if len(detection) > 5 else self.class_id
|
||||
|
||||
self.hits += 1
|
||||
self.time_since_update = 0
|
||||
self.history.append(self.tlbr.copy())
|
||||
|
||||
# Update state to confirmed after enough hits
|
||||
if self.state == 'Tentative' and self.hits >= 3:
|
||||
self.state = 'Confirmed'
|
||||
|
||||
def mark_missed(self):
|
||||
"""Mark track as missed (no detection matched)"""
|
||||
self.time_since_update += 1
|
||||
if self.time_since_update > 1:
|
||||
self.is_lost = True
|
||||
|
||||
def is_confirmed(self):
|
||||
"""Check if track is confirmed (has enough hits)"""
|
||||
return self.state == 'Confirmed'
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert track to dictionary format for video_controller_new.py"""
|
||||
return {
|
||||
'id': self.track_id,
|
||||
'bbox': [float(self.tlbr[0]), float(self.tlbr[1]), float(self.tlbr[2]), float(self.tlbr[3])],
|
||||
'confidence': float(self.score),
|
||||
'class_id': int(self.class_id)
|
||||
}
|
||||
|
||||
|
||||
class BYTETracker:
|
||||
"""
|
||||
ByteTrack tracker implementation
|
||||
Designed to work exactly like DeepSORT with video_controller_new.py
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
track_thresh=0.5,
|
||||
track_buffer=30,
|
||||
match_thresh=0.7,
|
||||
frame_rate=30,
|
||||
track_high_thresh=0.6,
|
||||
track_low_thresh=0.1
|
||||
):
|
||||
self.tracked_tracks = [] # Active tracks being tracked
|
||||
self.lost_tracks = [] # Lost tracks (temporarily out of view)
|
||||
self.removed_tracks = [] # Removed tracks (permanently lost)
|
||||
|
||||
self.frame_id = 0
|
||||
self.max_time_lost = int(frame_rate / 30.0 * track_buffer)
|
||||
|
||||
self.track_thresh = track_thresh # Threshold for high-confidence detections
|
||||
self.track_high_thresh = track_high_thresh # Higher threshold for first association
|
||||
self.track_low_thresh = track_low_thresh # Lower threshold for second association
|
||||
self.match_thresh = match_thresh # IOU match threshold
|
||||
|
||||
self.track_id_count = 0
|
||||
|
||||
print(f"[BYTETRACK] Initialized with: high_thresh={track_high_thresh}, " +
|
||||
f"low_thresh={track_low_thresh}, match_thresh={match_thresh}, max_time_lost={self.max_time_lost}")
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
"""Update tracks with new detections
|
||||
|
||||
Args:
|
||||
detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
frame: Optional BGR frame for debug visualization
|
||||
|
||||
Returns:
|
||||
list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
"""
|
||||
self.frame_id += 1
|
||||
|
||||
# Convert detections to internal format
|
||||
converted_detections = self._convert_detections(detections)
|
||||
|
||||
print(f"[BYTETRACK] Frame {self.frame_id}: Processing {len(converted_detections)} detections")
|
||||
print(f"[BYTETRACK] Current state: {len(self.tracked_tracks)} tracked, {len(self.lost_tracks)} lost")
|
||||
|
||||
# Handle empty detections case
|
||||
if len(converted_detections) == 0:
|
||||
print(f"[BYTETRACK] No valid detections in frame {self.frame_id}")
|
||||
# Move all tracked to lost and update
|
||||
for track in self.tracked_tracks:
|
||||
track.mark_missed()
|
||||
track.predict()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
self.lost_tracks.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
# Update lost tracks
|
||||
updated_lost = []
|
||||
for track in self.lost_tracks:
|
||||
track.predict()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
updated_lost.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
self.tracked_tracks = []
|
||||
self.lost_tracks = updated_lost
|
||||
return []
|
||||
|
||||
# Split detections into high and low confidence
|
||||
confidence_values = converted_detections[:, 4].astype(float)
|
||||
high_indices = confidence_values >= self.track_high_thresh
|
||||
low_indices = (confidence_values >= self.track_low_thresh) & (confidence_values < self.track_high_thresh)
|
||||
|
||||
high_detections = converted_detections[high_indices]
|
||||
low_detections = converted_detections[low_indices]
|
||||
|
||||
print(f"[BYTETRACK] Split into {len(high_detections)} high-conf and {len(low_detections)} low-conf detections")
|
||||
|
||||
# Predict all tracks
|
||||
for track in self.tracked_tracks + self.lost_tracks:
|
||||
track.predict()
|
||||
|
||||
# First association: high-confidence detections with tracked tracks
|
||||
matches1, unmatched_tracks1, unmatched_dets1 = self._associate(
|
||||
high_detections, self.tracked_tracks, self.match_thresh)
|
||||
|
||||
# Update matched tracks
|
||||
for match in matches1:
|
||||
track_idx, det_idx = match
|
||||
self.tracked_tracks[track_idx].update(high_detections[det_idx])
|
||||
self.tracked_tracks[track_idx].frame_id = self.frame_id
|
||||
|
||||
# Move unmatched tracks to lost
|
||||
unmatched_tracked_tracks = []
|
||||
for idx in unmatched_tracks1:
|
||||
track = self.tracked_tracks[idx]
|
||||
track.mark_missed()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
self.lost_tracks.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
# Keep only matched tracks
|
||||
self.tracked_tracks = [self.tracked_tracks[i] for i in range(len(self.tracked_tracks)) if i not in unmatched_tracks1]
|
||||
|
||||
# Second association: remaining high-conf detections with lost tracks
|
||||
if len(unmatched_dets1) > 0 and len(self.lost_tracks) > 0:
|
||||
remaining_high_dets = high_detections[unmatched_dets1]
|
||||
matches2, unmatched_tracks2, unmatched_dets2 = self._associate(
|
||||
remaining_high_dets, self.lost_tracks, self.match_thresh)
|
||||
|
||||
# Reactivate matched lost tracks
|
||||
for match in matches2:
|
||||
track_idx, det_idx = match
|
||||
track = self.lost_tracks[track_idx]
|
||||
track.update(remaining_high_dets[det_idx])
|
||||
track.frame_id = self.frame_id
|
||||
track.is_lost = False
|
||||
self.tracked_tracks.append(track)
|
||||
|
||||
# Remove reactivated tracks from lost
|
||||
self.lost_tracks = [self.lost_tracks[i] for i in range(len(self.lost_tracks)) if i not in [m[0] for m in matches2]]
|
||||
|
||||
# Update unmatched detections indices
|
||||
final_unmatched_dets = [unmatched_dets1[i] for i in unmatched_dets2]
|
||||
else:
|
||||
final_unmatched_dets = unmatched_dets1
|
||||
|
||||
# Third association: low-confidence detections with remaining lost tracks
|
||||
if len(low_detections) > 0 and len(self.lost_tracks) > 0:
|
||||
matches3, unmatched_tracks3, unmatched_dets3 = self._associate(
|
||||
low_detections, self.lost_tracks, self.match_thresh)
|
||||
|
||||
# Reactivate matched lost tracks
|
||||
for match in matches3:
|
||||
track_idx, det_idx = match
|
||||
track = self.lost_tracks[track_idx]
|
||||
track.update(low_detections[det_idx])
|
||||
track.frame_id = self.frame_id
|
||||
track.is_lost = False
|
||||
self.tracked_tracks.append(track)
|
||||
|
||||
# Remove reactivated tracks from lost
|
||||
self.lost_tracks = [self.lost_tracks[i] for i in range(len(self.lost_tracks)) if i not in [m[0] for m in matches3]]
|
||||
|
||||
# Create new tracks for remaining unmatched high-confidence detections
|
||||
new_tracks_created = 0
|
||||
for det_idx in final_unmatched_dets:
|
||||
detection = high_detections[det_idx]
|
||||
if detection[4] >= self.track_thresh: # Only create tracks for high-confidence detections
|
||||
self.track_id_count += 1
|
||||
new_track = Track(detection, self.track_id_count)
|
||||
new_track.frame_id = self.frame_id
|
||||
self.tracked_tracks.append(new_track)
|
||||
new_tracks_created += 1
|
||||
|
||||
# Clean up lost tracks that have been lost too long
|
||||
updated_lost = []
|
||||
removed_count = 0
|
||||
for track in self.lost_tracks:
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
updated_lost.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
removed_count += 1
|
||||
self.lost_tracks = updated_lost
|
||||
|
||||
print(f"[BYTETRACK] Matched {len(matches1)} tracks, created {new_tracks_created} new tracks, removed {removed_count} expired tracks")
|
||||
print(f"[BYTETRACK] Final state: {len(self.tracked_tracks)} tracked, {len(self.lost_tracks)} lost")
|
||||
|
||||
# Return confirmed tracks in dictionary format
|
||||
confirmed_tracks = []
|
||||
for track in self.tracked_tracks:
|
||||
if track.is_confirmed():
|
||||
confirmed_tracks.append(track.to_dict())
|
||||
|
||||
print(f"[BYTETRACK] Returning {len(confirmed_tracks)} confirmed tracks")
|
||||
return confirmed_tracks
|
||||
|
||||
def _convert_detections(self, detections):
|
||||
"""Convert detection format to numpy array"""
|
||||
if len(detections) == 0:
|
||||
return np.empty((0, 6))
|
||||
|
||||
converted = []
|
||||
for det in detections:
|
||||
bbox = det.get('bbox', [0, 0, 0, 0])
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', 0)
|
||||
|
||||
# Ensure bbox is valid
|
||||
if len(bbox) == 4 and bbox[2] > bbox[0] and bbox[3] > bbox[1]:
|
||||
converted.append([float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3]), float(conf), int(class_id)])
|
||||
|
||||
return np.array(converted) if converted else np.empty((0, 6))
|
||||
|
||||
def _associate(self, detections, tracks, iou_threshold):
|
||||
"""Associate detections with tracks using IoU"""
|
||||
if len(detections) == 0 or len(tracks) == 0:
|
||||
return [], list(range(len(tracks))), list(range(len(detections)))
|
||||
|
||||
# Calculate IoU matrix
|
||||
iou_matrix = self._calculate_iou_matrix(detections[:, :4], np.array([track.tlbr for track in tracks]))
|
||||
|
||||
# Use Hungarian algorithm (simplified greedy approach)
|
||||
matches, unmatched_tracks, unmatched_detections = self._linear_assignment(iou_matrix, iou_threshold)
|
||||
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
def _calculate_iou_matrix(self, detections, tracks):
|
||||
"""Calculate IoU matrix between detections and tracks"""
|
||||
if len(detections) == 0 or len(tracks) == 0:
|
||||
return np.empty((0, 0))
|
||||
|
||||
# Calculate areas
|
||||
det_areas = (detections[:, 2] - detections[:, 0]) * (detections[:, 3] - detections[:, 1])
|
||||
track_areas = (tracks[:, 2] - tracks[:, 0]) * (tracks[:, 3] - tracks[:, 1])
|
||||
|
||||
# Calculate intersections
|
||||
ious = np.zeros((len(detections), len(tracks)))
|
||||
for i, det in enumerate(detections):
|
||||
for j, track in enumerate(tracks):
|
||||
# Intersection coordinates
|
||||
x1 = max(det[0], track[0])
|
||||
y1 = max(det[1], track[1])
|
||||
x2 = min(det[2], track[2])
|
||||
y2 = min(det[3], track[3])
|
||||
|
||||
if x2 > x1 and y2 > y1:
|
||||
intersection = (x2 - x1) * (y2 - y1)
|
||||
union = det_areas[i] + track_areas[j] - intersection
|
||||
ious[i, j] = intersection / union if union > 0 else 0
|
||||
else:
|
||||
ious[i, j] = 0
|
||||
|
||||
return ious
|
||||
|
||||
def _linear_assignment(self, cost_matrix, threshold):
|
||||
"""Simple greedy assignment based on IoU threshold"""
|
||||
matches = []
|
||||
unmatched_tracks = list(range(cost_matrix.shape[1]))
|
||||
unmatched_detections = list(range(cost_matrix.shape[0]))
|
||||
|
||||
if cost_matrix.size == 0:
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
# Find matches above threshold
|
||||
for i in range(cost_matrix.shape[0]):
|
||||
for j in range(cost_matrix.shape[1]):
|
||||
if cost_matrix[i, j] >= threshold:
|
||||
if i in unmatched_detections and j in unmatched_tracks:
|
||||
matches.append([j, i]) # [track_idx, det_idx]
|
||||
unmatched_tracks.remove(j)
|
||||
unmatched_detections.remove(i)
|
||||
break
|
||||
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
|
||||
class ByteTrackVehicleTracker:
|
||||
"""
|
||||
ByteTrack-based vehicle tracker with exact same API as DeepSortVehicleTracker
|
||||
for drop-in replacement in video_controller_new.py
|
||||
"""
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
print("[BYTETRACK SINGLETON] Creating ByteTrackVehicleTracker instance")
|
||||
cls._instance = super(ByteTrackVehicleTracker, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if getattr(self, '_initialized', False):
|
||||
return
|
||||
print("[BYTETRACK INIT] Initializing ByteTrack tracker")
|
||||
|
||||
# Parameters optimized for vehicle tracking in traffic scenes
|
||||
self.tracker = BYTETracker(
|
||||
track_thresh=0.4, # Minimum confidence to create new tracks
|
||||
track_buffer=30, # How many frames to keep lost tracks
|
||||
match_thresh=0.7, # IoU threshold for matching
|
||||
track_high_thresh=0.5, # High confidence threshold for first association
|
||||
track_low_thresh=0.2, # Low confidence threshold for second association
|
||||
frame_rate=30 # Expected frame rate
|
||||
)
|
||||
|
||||
self._initialized = True
|
||||
self.debug = True # Enable debug output
|
||||
|
||||
# Memory management
|
||||
self.max_removed_tracks = 100 # Limit removed tracks to prevent memory issues
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
"""
|
||||
Update tracker with new detections - EXACT API as DeepSORT
|
||||
|
||||
Args:
|
||||
detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
frame: BGR image (optional)
|
||||
|
||||
Returns:
|
||||
list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
"""
|
||||
try:
|
||||
# Input validation
|
||||
if not isinstance(detections, list):
|
||||
print(f"[BYTETRACK ERROR] Invalid detections format: {type(detections)}")
|
||||
return []
|
||||
|
||||
# Process detections
|
||||
valid_dets = []
|
||||
for i, det in enumerate(detections):
|
||||
if not isinstance(det, dict):
|
||||
continue
|
||||
|
||||
bbox = det.get('bbox')
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', 0)
|
||||
|
||||
if bbox is not None and len(bbox) == 4:
|
||||
x1, y1, x2, y2 = map(float, bbox)
|
||||
conf = float(conf)
|
||||
class_id = int(class_id)
|
||||
|
||||
# Validate bbox dimensions
|
||||
if x2 > x1 and y2 > y1 and conf > 0.1:
|
||||
valid_dets.append({
|
||||
'bbox': [x1, y1, x2, y2],
|
||||
'confidence': conf,
|
||||
'class_id': class_id
|
||||
})
|
||||
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Processing {len(valid_dets)} valid detections")
|
||||
|
||||
# Update tracker
|
||||
tracks = self.tracker.update(valid_dets, frame)
|
||||
|
||||
# Memory management - limit removed tracks
|
||||
if len(self.tracker.removed_tracks) > self.max_removed_tracks:
|
||||
self.tracker.removed_tracks = self.tracker.removed_tracks[-self.max_removed_tracks//2:]
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Cleaned up removed tracks, keeping last {len(self.tracker.removed_tracks)}")
|
||||
|
||||
return tracks
|
||||
|
||||
except Exception as e:
|
||||
print(f"[BYTETRACK ERROR] Error updating tracker: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracks(self, detections, frame=None):
|
||||
"""
|
||||
Update method for compatibility with DeepSORT interface used by model_manager.py
|
||||
|
||||
Args:
|
||||
detections: list of detection arrays in format [bbox_xywh, conf, class_name]
|
||||
frame: BGR image (optional)
|
||||
|
||||
Returns:
|
||||
list of track objects with DeepSORT-compatible interface including is_confirmed() method
|
||||
"""
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] update_tracks called with {len(detections)} detections")
|
||||
|
||||
# Convert from DeepSORT format to ByteTrack format
|
||||
converted_dets = []
|
||||
|
||||
for det in detections:
|
||||
try:
|
||||
# Handle different detection formats
|
||||
if isinstance(det, (list, tuple)) and len(det) >= 2:
|
||||
# DeepSORT format: [bbox_xywh, conf, class_name]
|
||||
bbox_xywh, conf = det[:2]
|
||||
class_name = det[2] if len(det) > 2 else 'vehicle'
|
||||
|
||||
# Convert [x, y, w, h] to [x1, y1, x2, y2] with type validation
|
||||
if isinstance(bbox_xywh, (list, tuple, np.ndarray)) and len(bbox_xywh) == 4:
|
||||
x, y, w, h = map(float, bbox_xywh)
|
||||
conf = float(conf)
|
||||
|
||||
converted_dets.append({
|
||||
'bbox': [x, y, x + w, y + h],
|
||||
'confidence': conf,
|
||||
'class_id': 0 # Default vehicle class
|
||||
})
|
||||
else:
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Skipping invalid detection format: {det}")
|
||||
except Exception as e:
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Error converting detection: {e}")
|
||||
|
||||
# Call the regular update method to get dictionary tracks
|
||||
dict_tracks = self.update(converted_dets, frame)
|
||||
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Converting {len(dict_tracks)} dict tracks to DeepSORT-compatible objects")
|
||||
|
||||
# Create DeepSORT compatible track objects from dictionaries
|
||||
ds_tracks = []
|
||||
for track_data in dict_tracks:
|
||||
ds_track = ByteTrackOutput(track_data)
|
||||
ds_tracks.append(ds_track)
|
||||
|
||||
return ds_tracks
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset the tracker to clean state - starts track IDs from 1
|
||||
Call this when starting a new video or session
|
||||
"""
|
||||
print("[BYTETRACK] Resetting tracker state")
|
||||
if hasattr(self, 'tracker') and self.tracker is not None:
|
||||
# Reset the internal BYTETracker
|
||||
self.tracker.tracked_tracks = []
|
||||
self.tracker.lost_tracks = []
|
||||
self.tracker.removed_tracks = []
|
||||
self.tracker.frame_id = 0
|
||||
self.tracker.track_id_count = 0 # Reset ID counter to start from 1
|
||||
|
||||
print("[BYTETRACK] Reset complete - track IDs will start from 1")
|
||||
else:
|
||||
print("[BYTETRACK] Warning: Tracker not initialized, nothing to reset")
|
||||
|
||||
|
||||
class ByteTrackOutput:
|
||||
"""
|
||||
Adapter class to make ByteTrack output compatible with DeepSORT interface
|
||||
"""
|
||||
|
||||
def __init__(self, track_data):
|
||||
"""Initialize from ByteTrack track dictionary"""
|
||||
self.track_id = track_data.get('id', -1)
|
||||
self.det_index = track_data.get('det_index', -1)
|
||||
self.to_tlwh_ret = track_data.get('bbox', [0, 0, 0, 0]) # [x, y, w, h]
|
||||
self.bbox = track_data.get('bbox', [0, 0, 0, 0]) # Add bbox property
|
||||
self.confidence = track_data.get('confidence', 0.0)
|
||||
self.is_confirmed = track_data.get('is_confirmed', True)
|
||||
# Store the original track data
|
||||
self._track_data = track_data
|
||||
|
||||
def to_tlwh(self):
|
||||
"""Return bounding box in [x, y, w, h] format"""
|
||||
return self.to_tlwh_ret
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Fallback to original track data"""
|
||||
if name in self._track_data:
|
||||
return self._track_data[name]
|
||||
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
|
||||
103
qt_app_pyside1/controllers/deepsort_tracker.py
Normal file
103
qt_app_pyside1/controllers/deepsort_tracker.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# DeepSORT integration for vehicle tracking
|
||||
# You need to install deep_sort_realtime: pip install deep_sort_realtime
|
||||
from deep_sort_realtime.deepsort_tracker import DeepSort
|
||||
|
||||
class DeepSortVehicleTracker:
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
print("[DEEPSORT SINGLETON] Creating DeepSortVehicleTracker instance")
|
||||
cls._instance = super(DeepSortVehicleTracker, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if getattr(self, '_initialized', False):
|
||||
return
|
||||
print("[DEEPSORT INIT] Initializing DeepSort tracker (should only see this once)")
|
||||
# Use DeepSORT with better parameters to reduce duplicate IDs
|
||||
self.tracker = DeepSort(
|
||||
max_age=50, # Keep tracks longer to avoid re-creating IDs
|
||||
n_init=3, # Require 3 consecutive detections before confirming track
|
||||
nms_max_overlap=0.3, # Stricter NMS to avoid duplicate detections
|
||||
max_cosine_distance=0.4, # Stricter appearance matching
|
||||
nn_budget=100, # Budget for appearance features
|
||||
gating_only_position=False # Use both position and appearance for gating
|
||||
)
|
||||
self._initialized = True
|
||||
self.track_id_counter = {} # Track seen IDs to detect duplicates
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
# detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
# frame: BGR image (optional, for appearance embedding)
|
||||
# Returns: list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
|
||||
# Convert detections to DeepSORT format with validation
|
||||
ds_detections = []
|
||||
for i, det in enumerate(detections):
|
||||
bbox = det.get('bbox')
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', -1)
|
||||
|
||||
if bbox is not None and len(bbox) == 4:
|
||||
x1, y1, x2, y2 = bbox
|
||||
# Validate bbox dimensions
|
||||
if x2 > x1 and y2 > y1 and conf > 0.3: # Higher confidence threshold
|
||||
# Convert to [x1, y1, width, height] format expected by DeepSORT
|
||||
bbox_xywh = [x1, y1, x2-x1, y2-y1]
|
||||
ds_detections.append([bbox_xywh, conf, class_id])
|
||||
print(f"[DEEPSORT] Added detection {i}: bbox={bbox_xywh}, conf={conf:.2f}")
|
||||
else:
|
||||
print(f"[DEEPSORT] Rejected detection {i}: invalid bbox or low confidence")
|
||||
else:
|
||||
print(f"[DEEPSORT] Rejected detection {i}: invalid bbox format")
|
||||
|
||||
print(f"[DEEPSORT] Processing {len(ds_detections)} valid detections")
|
||||
|
||||
# Update tracker with frame for appearance features
|
||||
if frame is not None:
|
||||
tracks = self.tracker.update_tracks(ds_detections, frame=frame)
|
||||
else:
|
||||
tracks = self.tracker.update_tracks(ds_detections)
|
||||
|
||||
# Process results and check for duplicate IDs
|
||||
results = []
|
||||
current_ids = []
|
||||
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
|
||||
track_id = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
conf = track.det_conf if hasattr(track, 'det_conf') else 0.0
|
||||
class_id = track.det_class if hasattr(track, 'det_class') else -1
|
||||
|
||||
# Check for duplicate IDs
|
||||
if track_id in current_ids:
|
||||
print(f"[DEEPSORT ERROR] DUPLICATE ID DETECTED: {track_id}")
|
||||
continue # Skip this duplicate
|
||||
|
||||
current_ids.append(track_id)
|
||||
|
||||
# Convert back to [x1, y1, x2, y2] format
|
||||
x1, y1, x2, y2 = ltrb
|
||||
bbox_xyxy = [x1, y1, x2, y2]
|
||||
|
||||
results.append({
|
||||
'id': track_id,
|
||||
'bbox': bbox_xyxy,
|
||||
'confidence': conf,
|
||||
'class_id': class_id
|
||||
})
|
||||
|
||||
conf_str = f"{conf:.2f}" if conf is not None else "None"
|
||||
print(f"[DEEPSORT] Track ID={track_id}: bbox={bbox_xyxy}, conf={conf_str}")
|
||||
|
||||
# Update ID counter for statistics
|
||||
for track_id in current_ids:
|
||||
self.track_id_counter[track_id] = self.track_id_counter.get(track_id, 0) + 1
|
||||
|
||||
print(f"[DEEPSORT] Returning {len(results)} confirmed tracks")
|
||||
return results
|
||||
173
qt_app_pyside1/controllers/difference.py
Normal file
173
qt_app_pyside1/controllers/difference.py
Normal file
@@ -0,0 +1,173 @@
|
||||
# Detailed Comparison: video_controller_new.py vs video_controller_finale.py
|
||||
#
|
||||
# This document provides a function-by-function, block-by-block comparison between `video_controller_new.py` and `video_controller_finale.py` as of July 2025. It highlights what is present, missing, or different in each file, and explains the practical impact of those differences for real-world red light violation detection and video analytics.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Table of Contents
|
||||
# - [Overall Structure](#overall-structure)
|
||||
# - [Class/Function Inventory](#classfunction-inventory)
|
||||
# - [Function-by-Function Comparison](#function-by-function-comparison)
|
||||
# - [__init__](#__init__)
|
||||
# - [set_source](#set_source)
|
||||
# - [_get_source_properties](#_get_source_properties)
|
||||
# - [start/stop](#startstop)
|
||||
# - [_run](#_run)
|
||||
# - [_process_frame](#_process_frame)
|
||||
# - [detect_red_light_violations](#detect_red_light_violations)
|
||||
# - [Key Differences and Impact](#key-differences-and-impact)
|
||||
# - [Summary Table](#summary-table)
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Overall Structure
|
||||
#
|
||||
# - **video_controller_new.py**
|
||||
# - Modernized, modular, and debug-heavy.
|
||||
# - Uses enhanced annotation utilities, more robust fallback logic, and detailed debug output.
|
||||
# - Violation detection logic is inlined and self-contained.
|
||||
# - State machine for per-vehicle violation tracking is explicit and debugged.
|
||||
# - Crosswalk/violation line detection is always run, with fallback.
|
||||
# - Always emits overlays and signals, even if no violators.
|
||||
#
|
||||
# - **video_controller_finale.py**
|
||||
# - Reference implementation, known to work reliably in production.
|
||||
# - May use some different utility imports and slightly different state handling.
|
||||
# - Violation detection logic may be more tightly coupled to tracker or external detector class.
|
||||
# - Debug output is present but may be less granular.
|
||||
# - Fallbacks for violation line and traffic light are robust.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Class/Function Inventory
|
||||
#
|
||||
# | Function/Class | In New | In Finale | Notes |
|
||||
# |-------------------------------|--------|-----------|-------|
|
||||
# | VideoController | ✔ | ✔ | Main class in both |
|
||||
# | __init__ | ✔ | ✔ | New: more debug, explicit tracker, fallback logic |
|
||||
# | set_source | ✔ | ✔ | Similar, new has more robust type handling |
|
||||
# | _get_source_properties | ✔ | ✔ | Similar, new has more debug |
|
||||
# | start/stop | ✔ | ✔ | Similar, new has more debug |
|
||||
# | _run | ✔ | ✔ | New: more debug, more robust detection/tracking |
|
||||
# | _process_frame | ✔ | ✔ | New: always runs crosswalk, overlays, fallback |
|
||||
# | detect_red_light_violations | ✔ | ✔ | New: inlined, explicit state machine, more debug |
|
||||
# | violation_detector (external) | ✖ | ✔ | Finale may use RedLightViolationDetector class |
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Function-by-Function Comparison
|
||||
#
|
||||
# ### __init__
|
||||
# - **New:**
|
||||
# - Sets up all state, tracker, and debug counters.
|
||||
# - Imports and initializes crosswalk detection utilities with try/except.
|
||||
# - Does NOT use external `RedLightViolationDetector` (commented out).
|
||||
# - Uses inlined `detect_red_light_violations` method.
|
||||
# - **Finale:**
|
||||
# - May use external `RedLightViolationDetector` class for violation logic.
|
||||
# - Similar state setup, but possibly less debug output.
|
||||
#
|
||||
# ### set_source
|
||||
# - **New:**
|
||||
# - Handles all source types robustly (file, camera, URL, device).
|
||||
# - More debug output for every branch.
|
||||
# - **Finale:**
|
||||
# - Similar logic, possibly less robust for edge cases.
|
||||
#
|
||||
# ### _get_source_properties
|
||||
# - **New:**
|
||||
# - More debug output, retries for camera sources.
|
||||
# - **Finale:**
|
||||
# - Similar, but may not retry as aggressively.
|
||||
#
|
||||
# ### start/stop
|
||||
# - **New:**
|
||||
# - More debug output, aggressive render timer (10ms).
|
||||
# - **Finale:**
|
||||
# - Standard start/stop, less debug.
|
||||
#
|
||||
# ### _run
|
||||
# - **New:**
|
||||
# - Handles detection, tracking, and annotation in one loop.
|
||||
# - Always normalizes class names.
|
||||
# - Always draws overlays and emits signals.
|
||||
# - More debug output for every step.
|
||||
# - **Finale:**
|
||||
# - Similar, but may use external violation detector.
|
||||
# - May not always emit overlays if no detections.
|
||||
#
|
||||
# ### _process_frame
|
||||
# - **New:**
|
||||
# - Always runs crosswalk/violation line detection.
|
||||
# - Always overlays violation line and traffic light status.
|
||||
# - Only runs violation detection if both red light and violation line are present.
|
||||
# - Always emits overlays/signals, even if no violators.
|
||||
# - More robust fallback for violation line.
|
||||
# - More debug output for every step.
|
||||
# - **Finale:**
|
||||
# - Similar, but may skip overlays if no detections.
|
||||
# - May use external violation detector.
|
||||
#
|
||||
# ### detect_red_light_violations
|
||||
# - **New:**
|
||||
# - Inlined, explicit state machine for per-vehicle tracking.
|
||||
# - Requires vehicle to be behind the line before crossing during red.
|
||||
# - Cooldown logic to prevent duplicate violations.
|
||||
# - Extensive debug output for every vehicle, every frame.
|
||||
# - **Finale:**
|
||||
# - May use external class for violation logic.
|
||||
# - Similar state machine, but less debug output.
|
||||
# - May have slightly different fallback/cooldown logic.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Key Differences and Impact
|
||||
#
|
||||
# - **External Violation Detector:**
|
||||
# - Finale uses `RedLightViolationDetector` class; New inlines the logic.
|
||||
# - Impact: New is easier to debug and modify, but harder to swap out logic.
|
||||
#
|
||||
# - **Debug Output:**
|
||||
# - New has much more granular debug output for every step and every vehicle.
|
||||
# - Impact: Easier to diagnose issues in New.
|
||||
#
|
||||
# - **Fallback Logic:**
|
||||
# - Both have robust fallback for violation line and traffic light, but New is more explicit.
|
||||
#
|
||||
# - **Overlay/Signal Emission:**
|
||||
# - New always emits overlays and signals, even if no violators.
|
||||
# - Finale may skip if no detections.
|
||||
#
|
||||
# - **State Machine:**
|
||||
# - New's state machine is explicit, per-vehicle, and debugged.
|
||||
# - Finale's may be more implicit or handled in external class.
|
||||
#
|
||||
# - **Modularity:**
|
||||
# - Finale is more modular (external detector class), New is more monolithic but easier to trace.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Summary Table
|
||||
#
|
||||
# | Feature/Function | video_controller_new.py | video_controller_finale.py |
|
||||
# |---------------------------------|:----------------------:|:-------------------------:|
|
||||
# | External Violation Detector | ✖ | ✔ |
|
||||
# | Inlined Violation Logic | ✔ | ✖ |
|
||||
# | Robust Fallbacks | ✔ | ✔ |
|
||||
# | Always Emits Overlays/Signals | ✔ | ✖/Partial |
|
||||
# | Extensive Debug Output | ✔ | ✖/Partial |
|
||||
# | Per-Vehicle State Machine | ✔ | ✔ |
|
||||
# | Modularity | ✖ | ✔ |
|
||||
# | Easy to Debug/Modify | ✔ | ✖/Partial |
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Conclusion
|
||||
#
|
||||
# - Use `video_controller_new.py` for maximum debug visibility, easier modification, and robust fallback logic.
|
||||
# - Use `video_controller_finale.py` for production-proven modularity and if you want to swap out violation logic easily.
|
||||
# - Both are robust, but the new version is more transparent and easier to debug in real-world scenarios.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# *This file is auto-generated for developer reference. Update as code evolves.*
|
||||
394
qt_app_pyside1/controllers/embedder_import_patch.py
Normal file
394
qt_app_pyside1/controllers/embedder_import_patch.py
Normal file
@@ -0,0 +1,394 @@
|
||||
from deep_sort_realtime.embedder.embedder_pytorch import MobileNetV2_Embedder
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import OpenVINO modules
|
||||
from detection_openvino import OpenVINOVehicleDetector
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
|
||||
# Import from our utils package
|
||||
from utils.helpers import bbox_iou
|
||||
|
||||
class ModelManager:
|
||||
"""
|
||||
Manages OpenVINO models for traffic detection and violation monitoring.
|
||||
Only uses RedLightViolationPipeline for all violation/crosswalk/traffic light logic.
|
||||
"""
|
||||
def __init__(self, config_file: str = None):
|
||||
"""
|
||||
Initialize model manager with configuration.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.detector = None
|
||||
self.violation_pipeline = None # Use RedLightViolationPipeline only
|
||||
self.tracker = None
|
||||
self._initialize_models()
|
||||
|
||||
def _load_config(self, config_file: Optional[str]) -> Dict:
|
||||
"""
|
||||
Load configuration from file or use defaults.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
"""
|
||||
import json
|
||||
default_config = {
|
||||
"detection": {
|
||||
"confidence_threshold": 0.5,
|
||||
"enable_ocr": True,
|
||||
"enable_tracking": True,
|
||||
"model_path": None
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": True,
|
||||
"show_labels": True,
|
||||
"show_license_plates": True
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
}
|
||||
}
|
||||
|
||||
if config_file and os.path.exists(config_file):
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
loaded_config = json.load(f)
|
||||
# Merge with defaults (preserving loaded values)
|
||||
for section in default_config:
|
||||
if section in loaded_config:
|
||||
default_config[section].update(loaded_config[section])
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
|
||||
return default_config
|
||||
|
||||
def _initialize_models(self):
|
||||
"""Initialize OpenVINO detection and violation models."""
|
||||
try:
|
||||
# Find best model path
|
||||
model_path = self.config["detection"].get("model_path")
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
model_path = self._find_best_model_path()
|
||||
if not model_path:
|
||||
print("❌ No model found")
|
||||
return
|
||||
|
||||
# Initialize detector
|
||||
print(f"✅ Initializing OpenVINO detector with model: {model_path}")
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
print(f"✅ Using inference device: {device}")
|
||||
self.detector = OpenVINOVehicleDetector(
|
||||
model_path=model_path,
|
||||
device=device,
|
||||
confidence_threshold=self.config["detection"]["confidence_threshold"]
|
||||
)
|
||||
|
||||
# Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic
|
||||
self.violation_pipeline = RedLightViolationPipeline(debug=True)
|
||||
print("✅ Red light violation pipeline initialized (all other violation logic removed)")
|
||||
|
||||
# Initialize tracker if enabled
|
||||
if self.config["detection"]["enable_tracking"]:
|
||||
try:
|
||||
from deep_sort_realtime.deepsort_tracker import DeepSort
|
||||
|
||||
# Use optimized OpenVINO embedder if available
|
||||
use_optimized_embedder = True
|
||||
embedder = None
|
||||
|
||||
if use_optimized_embedder:
|
||||
try:
|
||||
# Try importing our custom OpenVINO embedder
|
||||
from utils.embedder_openvino import OpenVINOEmbedder
|
||||
print(f"✅ Initializing optimized OpenVINO embedder on {device}")
|
||||
|
||||
# Set model_path explicitly to use the user-supplied model
|
||||
script_dir = Path(__file__).parent.parent
|
||||
model_file_path = None
|
||||
|
||||
# Try the copy version first (might be modified for compatibility)
|
||||
copy_model_path = script_dir / "mobilenetv2 copy.xml"
|
||||
original_model_path = script_dir / "mobilenetv2.xml"
|
||||
|
||||
if copy_model_path.exists():
|
||||
model_file_path = str(copy_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
elif original_model_path.exists():
|
||||
model_file_path = str(original_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
|
||||
embedder = OpenVINOEmbedder(
|
||||
model_path=model_file_path,
|
||||
device=device,
|
||||
half=True # Use FP16 for better performance
|
||||
)
|
||||
except Exception as emb_err:
|
||||
print(f"⚠️ OpenVINO embedder failed: {emb_err}, falling back to default")
|
||||
|
||||
# Initialize tracker with embedder based on available parameters
|
||||
if embedder is None:
|
||||
print("⚠️ No embedder available, using DeepSORT with default tracking")
|
||||
else:
|
||||
print("✅ Initializing DeepSORT with custom embedder")
|
||||
|
||||
# Simple initialization without problematic parameters
|
||||
self.tracker = DeepSort(
|
||||
max_age=30,
|
||||
n_init=3,
|
||||
nn_budget=100,
|
||||
embedder=embedder
|
||||
)
|
||||
print("✅ DeepSORT tracker initialized")
|
||||
except ImportError:
|
||||
print("⚠️ DeepSORT not available")
|
||||
self.tracker = None
|
||||
print("✅ Models initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error initializing models: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]:
|
||||
"""
|
||||
Find best available model file in workspace.
|
||||
|
||||
Args:
|
||||
base_model_name: Base model name without extension
|
||||
|
||||
Returns:
|
||||
Path to model file or None
|
||||
"""
|
||||
# Select model based on device if base_model_name is not specified
|
||||
if base_model_name is None:
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
if device == "CPU" or device == "AUTO":
|
||||
# Use yolo11n for CPU - faster, lighter model
|
||||
base_model_name = "yolo11n"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
|
||||
else:
|
||||
# Use yolo11x for GPU - larger model with better accuracy
|
||||
base_model_name = "yolo11x"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
|
||||
|
||||
# Check if the openvino_models directory exists in the current working directory
|
||||
cwd_openvino_dir = Path.cwd() / "openvino_models"
|
||||
if cwd_openvino_dir.exists():
|
||||
direct_path = cwd_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model directly in CWD: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for absolute path to openvino_models (this is the most reliable)
|
||||
absolute_openvino_dir = Path("D:/Downloads/finale6/khatam/openvino_models")
|
||||
if absolute_openvino_dir.exists():
|
||||
direct_path = absolute_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model at absolute path: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Try relative to the model_manager.py file
|
||||
openvino_models_dir = Path(__file__).parent.parent.parent / "openvino_models"
|
||||
direct_path = openvino_models_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model in app directory: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for model in folder structure within openvino_models
|
||||
subfolder_path = openvino_models_dir / f"{base_model_name}_openvino_model" / f"{base_model_name}.xml"
|
||||
if subfolder_path.exists():
|
||||
print(f"✅ Found model in subfolder: {subfolder_path}")
|
||||
return str(subfolder_path.absolute())
|
||||
|
||||
# Try other common locations
|
||||
search_dirs = [
|
||||
".",
|
||||
"..",
|
||||
"../models",
|
||||
"../rcb",
|
||||
"../openvino_models",
|
||||
f"../{base_model_name}_openvino_model",
|
||||
"../..", # Go up to project root
|
||||
"../../openvino_models", # Project root / openvino_models
|
||||
]
|
||||
|
||||
model_extensions = [
|
||||
(f"{base_model_name}.xml", "OpenVINO IR direct"),
|
||||
(f"{base_model_name}_openvino_model/{base_model_name}.xml", "OpenVINO IR"),
|
||||
(f"{base_model_name}.pt", "PyTorch"),
|
||||
]
|
||||
|
||||
for search_dir in search_dirs:
|
||||
search_path = Path(__file__).parent.parent / search_dir
|
||||
if not search_path.exists():
|
||||
continue
|
||||
|
||||
for model_file, model_type in model_extensions:
|
||||
model_path = search_path / model_file
|
||||
if model_path.exists():
|
||||
print(f"✅ Found {model_type} model: {model_path}")
|
||||
return str(model_path.absolute())
|
||||
|
||||
print(f"❌ No model found for {base_model_name}")
|
||||
return None
|
||||
|
||||
def detect(self, frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Detect objects in frame.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
|
||||
Returns:
|
||||
List of detection dictionaries
|
||||
"""
|
||||
if self.detector is None:
|
||||
print("WARNING: No detector available")
|
||||
return []
|
||||
try:
|
||||
# Use a lower confidence threshold for better visibility
|
||||
conf_threshold = max(0.3, self.config["detection"].get("confidence_threshold", 0.5))
|
||||
detections = self.detector.detect_vehicles(frame, conf_threshold=conf_threshold)
|
||||
|
||||
# Add debug output
|
||||
if detections:
|
||||
print(f"DEBUG: Detected {len(detections)} objects: " +
|
||||
", ".join([f"{d['class_name']} ({d['confidence']:.2f})" for d in detections[:3]]))
|
||||
|
||||
# Print bounding box coordinates of first detection
|
||||
if len(detections) > 0:
|
||||
print(f"DEBUG: First detection bbox: {detections[0]['bbox']}")
|
||||
else:
|
||||
print("DEBUG: No detections in this frame")
|
||||
|
||||
return detections
|
||||
except Exception as e:
|
||||
print(f"❌ Detection error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracking(self, detections: List[Dict], frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Update tracking information for detections.
|
||||
|
||||
Args:
|
||||
detections: List of detections
|
||||
frame: Current video frame
|
||||
|
||||
Returns:
|
||||
Updated list of detections with tracking info
|
||||
"""
|
||||
if not self.tracker or not detections:
|
||||
return detections
|
||||
|
||||
try:
|
||||
# Format detections for DeepSORT
|
||||
tracker_dets = []
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
x1, y1, x2, y2 = bbox
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
|
||||
if w <= 0 or h <= 0:
|
||||
continue
|
||||
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_name = det.get('class_name', 'unknown')
|
||||
tracker_dets.append(([x1, y1, w, h], conf, class_name))
|
||||
|
||||
# Update tracks
|
||||
if tracker_dets:
|
||||
tracks = self.tracker.update_tracks(tracker_dets, frame=frame)
|
||||
|
||||
# Associate tracks with detections
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
|
||||
track_id = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
dx1, dy1, dx2, dy2 = bbox
|
||||
iou = bbox_iou((dx1, dy1, dx2, dy2), tuple(map(int, ltrb)))
|
||||
|
||||
if iou > 0.5:
|
||||
det['track_id'] = track_id
|
||||
break
|
||||
return detections
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Tracking error: {e}")
|
||||
return detections
|
||||
|
||||
def update_config(self, new_config: Dict):
|
||||
"""
|
||||
Update configuration parameters.
|
||||
|
||||
Args:
|
||||
new_config: New configuration dictionary
|
||||
"""
|
||||
if not new_config:
|
||||
return
|
||||
|
||||
# Store old device setting to check if it changed
|
||||
old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO"
|
||||
|
||||
# Update configuration
|
||||
for section in new_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(new_config[section])
|
||||
else:
|
||||
self.config[section] = new_config[section]
|
||||
|
||||
# Check if device changed - if so, we need to reinitialize models
|
||||
new_device = self.config["detection"].get("device", "AUTO")
|
||||
device_changed = old_device != new_device
|
||||
|
||||
if device_changed:
|
||||
print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...")
|
||||
# Reinitialize models with new device
|
||||
self._initialize_models()
|
||||
return
|
||||
|
||||
# Just update detector confidence threshold if device didn't change
|
||||
if self.detector:
|
||||
conf_thres = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
self.detector.conf_thres = conf_thres
|
||||
686
qt_app_pyside1/controllers/enhanced_video_controller.py
Normal file
686
qt_app_pyside1/controllers/enhanced_video_controller.py
Normal file
@@ -0,0 +1,686 @@
|
||||
"""
|
||||
Enhanced video controller with async inference and separated FPS tracking
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
from typing import Dict, List, Optional, Tuple, Any
|
||||
from pathlib import Path
|
||||
from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer
|
||||
from PySide6.QtGui import QImage, QPixmap
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import our async detector
|
||||
try:
|
||||
# Try direct import first
|
||||
from detection_openvino_async import OpenVINOVehicleDetector
|
||||
except ImportError:
|
||||
# Fall back to import from project root
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||
from detection_openvino_async import OpenVINOVehicleDetector
|
||||
|
||||
# Import traffic light color detection utility
|
||||
try:
|
||||
from utils.traffic_light_utils import detect_traffic_light_color, draw_traffic_light_status
|
||||
print("✅ Imported traffic light color detection utilities")
|
||||
except ImportError:
|
||||
# Create simple placeholder functions if imports fail
|
||||
def detect_traffic_light_color(frame, bbox):
|
||||
return {"color": "unknown", "confidence": 0.0}
|
||||
|
||||
def draw_traffic_light_status(frame, bbox, color):
|
||||
return frame
|
||||
print("⚠️ Failed to import traffic light color detection utilities")
|
||||
|
||||
# Import utilities for visualization
|
||||
try:
|
||||
# Try the direct import when running inside the qt_app_pyside directory
|
||||
from utils.enhanced_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Successfully imported enhanced_annotation_utils from utils package")
|
||||
except ImportError:
|
||||
try:
|
||||
# Try fully qualified import path
|
||||
from qt_app_pyside.utils.enhanced_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Successfully imported enhanced_annotation_utils from qt_app_pyside.utils package")
|
||||
except ImportError:
|
||||
# Fall back to our minimal implementation
|
||||
print("⚠️ Could not import enhanced_annotation_utils, using fallback implementation")
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||
try:
|
||||
from fallback_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Using fallback_annotation_utils")
|
||||
except ImportError:
|
||||
print("❌ CRITICAL: Could not import annotation utilities! UI will be broken.")
|
||||
# Define minimal stub functions to prevent crashes
|
||||
def enhanced_draw_detections(frame, detections, **kwargs):
|
||||
return frame
|
||||
def draw_performance_overlay(frame, metrics):
|
||||
return frame
|
||||
def enhanced_cv_to_qimage(frame):
|
||||
return None
|
||||
def enhanced_cv_to_pixmap(frame):
|
||||
return None
|
||||
|
||||
class AsyncVideoProcessingThread(QThread):
|
||||
"""Thread for async video processing with separate detection and UI threads."""
|
||||
|
||||
# Signal for UI update with enhanced metadata
|
||||
frame_processed = Signal(np.ndarray, list, dict) # frame, detections, metrics
|
||||
|
||||
# Signal for separate processing metrics
|
||||
stats_updated = Signal(dict) # All performance metrics
|
||||
|
||||
def __init__(self, model_manager, parent=None):
|
||||
super().__init__(parent)
|
||||
self.model_manager = model_manager
|
||||
self.running = False
|
||||
self.paused = False
|
||||
|
||||
# Video source
|
||||
self.source = 0
|
||||
self.cap = None
|
||||
self.source_fps = 0
|
||||
self.target_fps = 30 # Target FPS for UI updates
|
||||
|
||||
# Performance tracking
|
||||
self.detection_fps = 0
|
||||
self.ui_fps = 0
|
||||
self.frame_count = 0
|
||||
self.start_time = 0
|
||||
self.detection_times = deque(maxlen=30) # Last 30 detection times
|
||||
self.ui_frame_times = deque(maxlen=30) # Last 30 UI frame times
|
||||
self.last_ui_frame_time = 0
|
||||
|
||||
# Mutexes for thread safety
|
||||
self.mutex = QMutex()
|
||||
self.wait_condition = QWaitCondition()
|
||||
|
||||
# FPS limiter to avoid CPU overload
|
||||
self.last_frame_time = 0
|
||||
self.min_frame_interval = 1.0 / 60 # Max 60 FPS
|
||||
|
||||
# Async processing queue with frame IDs
|
||||
self.frame_queue = [] # List of (frame_id, frame) tuples
|
||||
self.next_frame_id = 0
|
||||
self.processed_frames = {} # frame_id -> (frame, detections, metrics)
|
||||
self.last_emitted_frame_id = -1
|
||||
# Separate UI thread timer for smooth display
|
||||
self.ui_timer = QTimer()
|
||||
self.ui_timer.timeout.connect(self._emit_next_frame)
|
||||
|
||||
def set_source(self, source):
|
||||
"""Set video source - camera index or file path."""
|
||||
print(f"[AsyncThread] set_source: {source} ({type(source)})")
|
||||
if source is None:
|
||||
self.source = 0
|
||||
elif isinstance(source, str) and os.path.isfile(source):
|
||||
self.source = source
|
||||
elif isinstance(source, int):
|
||||
self.source = source
|
||||
else:
|
||||
print("[AsyncThread] Invalid source, defaulting to camera")
|
||||
self.source = 0
|
||||
|
||||
def start_processing(self):
|
||||
"""Start video processing."""
|
||||
self.running = True
|
||||
self.start()
|
||||
# Start UI timer for smooth frame emission
|
||||
self.ui_timer.start(int(1000 / self.target_fps))
|
||||
|
||||
def stop_processing(self):
|
||||
"""Stop video processing."""
|
||||
self.running = False
|
||||
self.wait_condition.wakeAll()
|
||||
self.wait()
|
||||
self.ui_timer.stop()
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
def pause_processing(self):
|
||||
"""Pause video processing."""
|
||||
self.mutex.lock()
|
||||
self.paused = True
|
||||
self.mutex.unlock()
|
||||
|
||||
def resume_processing(self):
|
||||
"""Resume video processing."""
|
||||
self.mutex.lock()
|
||||
self.paused = False
|
||||
self.wait_condition.wakeAll()
|
||||
self.mutex.unlock()
|
||||
|
||||
def run(self):
|
||||
"""Main thread execution loop."""
|
||||
self._initialize_video()
|
||||
self.start_time = time.time()
|
||||
self.frame_count = 0
|
||||
|
||||
while self.running:
|
||||
# Check if paused
|
||||
self.mutex.lock()
|
||||
if self.paused:
|
||||
self.wait_condition.wait(self.mutex)
|
||||
self.mutex.unlock()
|
||||
|
||||
if not self.running:
|
||||
break
|
||||
|
||||
# Control frame rate
|
||||
current_time = time.time()
|
||||
time_diff = current_time - self.last_frame_time
|
||||
if time_diff < self.min_frame_interval:
|
||||
time.sleep(self.min_frame_interval - time_diff)
|
||||
|
||||
# Read frame
|
||||
ret, frame = self.cap.read()
|
||||
self.last_frame_time = time.time()
|
||||
|
||||
if not ret or frame is None:
|
||||
print("End of video or failed to read frame")
|
||||
# Check if we're using a file and should restart
|
||||
if isinstance(self.source, str) and os.path.isfile(self.source):
|
||||
self._initialize_video() # Restart video
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
# Process frame asynchronously
|
||||
self._process_frame_async(frame)
|
||||
|
||||
# Update frame counter
|
||||
self.frame_count += 1
|
||||
|
||||
# Clean up when thread exits
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
def _initialize_video(self):
|
||||
"""Initialize video source."""
|
||||
try:
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
|
||||
print(f"[EnhancedVideoController] _initialize_video: self.source = {self.source} (type: {type(self.source)})")
|
||||
# Only use camera if source is int or '0', else use file path
|
||||
if isinstance(self.source, int):
|
||||
self.cap = cv2.VideoCapture(self.source)
|
||||
elif isinstance(self.source, str) and os.path.isfile(self.source):
|
||||
self.cap = cv2.VideoCapture(self.source)
|
||||
else:
|
||||
print(f"[EnhancedVideoController] Invalid source: {self.source}, not opening VideoCapture.")
|
||||
return False
|
||||
|
||||
if not self.cap.isOpened():
|
||||
print(f"Failed to open video source: {self.source}")
|
||||
return False
|
||||
|
||||
# Get source FPS
|
||||
self.source_fps = self.cap.get(cv2.CAP_PROP_FPS)
|
||||
if self.source_fps <= 0:
|
||||
self.source_fps = 30 # Default fallback
|
||||
|
||||
print(f"Video source initialized: {self.source}, FPS: {self.source_fps}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error initializing video: {e}")
|
||||
return False
|
||||
|
||||
def _process_frame_async(self, frame):
|
||||
"""Process a frame with async detection."""
|
||||
try:
|
||||
# Start detection timer
|
||||
detection_start = time.time()
|
||||
|
||||
# Assign frame ID
|
||||
frame_id = self.next_frame_id
|
||||
self.next_frame_id += 1
|
||||
|
||||
# Get detector and start async inference
|
||||
detector = self.model_manager.detector
|
||||
|
||||
# Check if detector supports async API
|
||||
if hasattr(detector, 'detect_async_start'):
|
||||
# Use async API
|
||||
inf_frame_id = detector.detect_async_start(frame)
|
||||
|
||||
# Store frame in queue with the right ID
|
||||
self.mutex.lock()
|
||||
self.frame_queue.append((frame_id, frame, inf_frame_id))
|
||||
self.mutex.unlock()
|
||||
|
||||
# Try getting results from previous frames
|
||||
self._check_async_results()
|
||||
|
||||
else:
|
||||
# Fallback to synchronous API
|
||||
detections = self.model_manager.detect(frame)
|
||||
|
||||
# Calculate detection time
|
||||
detection_time = time.time() - detection_start
|
||||
self.detection_times.append(detection_time)
|
||||
|
||||
# Update detection FPS
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed > 0:
|
||||
self.detection_fps = self.frame_count / elapsed
|
||||
|
||||
# Calculate detection metrics
|
||||
detection_ms = detection_time * 1000
|
||||
avg_detection_ms = np.mean(self.detection_times) * 1000
|
||||
|
||||
# Store metrics
|
||||
metrics = {
|
||||
'detection_fps': self.detection_fps,
|
||||
'detection_ms': detection_ms,
|
||||
'avg_detection_ms': avg_detection_ms,
|
||||
'frame_id': frame_id
|
||||
}
|
||||
|
||||
# Store processed frame
|
||||
self.mutex.lock()
|
||||
self.processed_frames[frame_id] = (frame, detections, metrics)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Emit stats update
|
||||
self.stats_updated.emit(metrics)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in frame processing: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _check_async_results(self):
|
||||
"""Check for completed async inference requests."""
|
||||
try:
|
||||
detector = self.model_manager.detector
|
||||
if not hasattr(detector, 'detect_async_get_result'):
|
||||
return
|
||||
|
||||
# Get any frames waiting for results
|
||||
self.mutex.lock()
|
||||
queue_copy = self.frame_queue.copy()
|
||||
self.mutex.unlock()
|
||||
|
||||
processed_frames = []
|
||||
|
||||
# Check each frame in the queue
|
||||
for idx, (frame_id, frame, inf_frame_id) in enumerate(queue_copy):
|
||||
# Try to get results without waiting
|
||||
detections = detector.detect_async_get_result(inf_frame_id, wait=False)
|
||||
|
||||
# If results are ready
|
||||
if detections is not None:
|
||||
# Calculate metrics
|
||||
detection_time = time.time() - detector.active_requests[inf_frame_id][2] if inf_frame_id in detector.active_requests else 0
|
||||
self.detection_times.append(detection_time)
|
||||
|
||||
# Update detection FPS
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed > 0:
|
||||
self.detection_fps = self.frame_count / elapsed
|
||||
|
||||
# Calculate metrics
|
||||
detection_ms = detection_time * 1000
|
||||
avg_detection_ms = np.mean(self.detection_times) * 1000
|
||||
|
||||
# Store metrics
|
||||
metrics = {
|
||||
'detection_fps': self.detection_fps,
|
||||
'detection_ms': detection_ms,
|
||||
'avg_detection_ms': avg_detection_ms,
|
||||
'frame_id': frame_id
|
||||
}
|
||||
|
||||
# Store processed frame
|
||||
self.mutex.lock()
|
||||
self.processed_frames[frame_id] = (frame, detections, metrics)
|
||||
processed_frames.append(frame_id)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Emit stats update
|
||||
self.stats_updated.emit(metrics)
|
||||
|
||||
# Remove processed frames from queue
|
||||
if processed_frames:
|
||||
self.mutex.lock()
|
||||
self.frame_queue = [item for item in self.frame_queue
|
||||
if item[0] not in processed_frames]
|
||||
self.mutex.unlock()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error checking async results: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _emit_next_frame(self):
|
||||
"""Emit the next processed frame to UI at a controlled rate."""
|
||||
try:
|
||||
# Update UI FPS calculation
|
||||
current_time = time.time()
|
||||
if self.last_ui_frame_time > 0:
|
||||
ui_frame_time = current_time - self.last_ui_frame_time
|
||||
self.ui_frame_times.append(ui_frame_time)
|
||||
self.ui_fps = 1.0 / ui_frame_time if ui_frame_time > 0 else 0
|
||||
self.last_ui_frame_time = current_time
|
||||
|
||||
# Check async results first
|
||||
self._check_async_results()
|
||||
|
||||
# Find the next frame to emit
|
||||
self.mutex.lock()
|
||||
available_frames = sorted(self.processed_frames.keys())
|
||||
self.mutex.unlock()
|
||||
|
||||
if not available_frames:
|
||||
return
|
||||
|
||||
next_frame_id = available_frames[0]
|
||||
|
||||
# Get the frame data
|
||||
self.mutex.lock()
|
||||
frame, detections, metrics = self.processed_frames.pop(next_frame_id)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Add UI FPS to metrics
|
||||
metrics['ui_fps'] = self.ui_fps
|
||||
|
||||
# Apply tracking if available
|
||||
if self.model_manager.tracker:
|
||||
detections = self.model_manager.update_tracking(detections, frame)
|
||||
|
||||
# Emit the frame to the UI
|
||||
self.frame_processed.emit(frame, detections, metrics)
|
||||
|
||||
# Store as last emitted frame
|
||||
self.last_emitted_frame_id = next_frame_id
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error emitting frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
class EnhancedVideoController(QObject):
|
||||
"""
|
||||
Enhanced video controller with better file handling and statistics.
|
||||
"""
|
||||
# Define signals
|
||||
frame_ready = Signal(QPixmap) # Frame as QPixmap for direct display
|
||||
frame_np_ready = Signal(np.ndarray) # Frame as NumPy array
|
||||
raw_frame_ready = Signal(dict) # Raw frame data with detections
|
||||
stats_ready = Signal(dict) # All performance stats (dictionary with fps and detection_time)
|
||||
|
||||
# Add instance variable to track the most recent traffic light color
|
||||
def __init__(self, model_manager=None):
|
||||
"""Initialize the video controller"""
|
||||
super().__init__()
|
||||
|
||||
# Input source
|
||||
self._source = 0 # Default to camera 0
|
||||
self._source_type = "camera"
|
||||
self._running = False
|
||||
self._last_traffic_light_color = "unknown"
|
||||
|
||||
# Regular Controller instance variables
|
||||
self.model_manager = model_manager
|
||||
self.processing_thread = None
|
||||
self.show_annotations = True
|
||||
self.show_fps = True
|
||||
self.save_video = False
|
||||
self.video_writer = None
|
||||
|
||||
def set_source(self, source):
|
||||
"""Set video source - camera index or file path."""
|
||||
print(f"[EnhancedVideoController] set_source: {source} ({type(source)})")
|
||||
if self.processing_thread:
|
||||
self.processing_thread.set_source(source)
|
||||
|
||||
def start(self):
|
||||
"""Start video processing."""
|
||||
if self.processing_thread and self.processing_thread.running:
|
||||
return
|
||||
|
||||
# Create new processing thread
|
||||
self.processing_thread = AsyncVideoProcessingThread(self.model_manager)
|
||||
|
||||
# Connect signals
|
||||
self.processing_thread.frame_processed.connect(self._on_frame_processed)
|
||||
self.processing_thread.stats_updated.connect(self._on_stats_updated)
|
||||
|
||||
# Start processing
|
||||
self.processing_thread.start_processing()
|
||||
|
||||
def stop(self):
|
||||
"""Stop video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.stop_processing()
|
||||
self.processing_thread = None
|
||||
|
||||
if self.video_writer:
|
||||
self.video_writer.release()
|
||||
self.video_writer = None
|
||||
|
||||
def pause(self):
|
||||
"""Pause video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.pause_processing()
|
||||
|
||||
def resume(self):
|
||||
"""Resume video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.resume_processing()
|
||||
|
||||
def toggle_annotations(self, enabled):
|
||||
"""Toggle annotations on/off."""
|
||||
self.show_annotations = enabled
|
||||
|
||||
def toggle_fps_display(self, enabled):
|
||||
"""Toggle FPS display on/off."""
|
||||
self.show_fps = enabled
|
||||
|
||||
def start_recording(self, output_path, frame_size=(640, 480), fps=30):
|
||||
"""Start recording video to file."""
|
||||
self.save_video = True
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
||||
self.video_writer = cv2.VideoWriter(
|
||||
output_path, fourcc, fps,
|
||||
(frame_size[0], frame_size[1])
|
||||
)
|
||||
|
||||
def stop_recording(self):
|
||||
"""Stop recording video."""
|
||||
self.save_video = False
|
||||
if self.video_writer:
|
||||
self.video_writer.release()
|
||||
self.video_writer = None
|
||||
|
||||
def _on_frame_processed(self, frame, detections, metrics):
|
||||
"""Handle processed frame from the worker thread."""
|
||||
try:
|
||||
# Create a copy of the frame for annotation
|
||||
display_frame = frame.copy()
|
||||
|
||||
# Apply annotations if enabled
|
||||
if self.show_annotations and detections:
|
||||
display_frame = enhanced_draw_detections(display_frame, detections) # Detect and annotate traffic light colors
|
||||
for detection in detections:
|
||||
# Check for both class_id 9 (COCO) and any other traffic light classes
|
||||
if detection.get('class_id') == 9 or detection.get('class_name') == 'traffic light':
|
||||
bbox = detection.get('bbox')
|
||||
if not bbox:
|
||||
continue
|
||||
|
||||
# Get traffic light color
|
||||
color = detect_traffic_light_color(frame, bbox)
|
||||
# Store the latest traffic light color
|
||||
self._last_traffic_light_color = color
|
||||
# Draw traffic light status
|
||||
display_frame = draw_traffic_light_status(display_frame, bbox, color)
|
||||
print(f"🚦 Traffic light detected with color: {color}")
|
||||
|
||||
# Add FPS counter if enabled
|
||||
if self.show_fps:
|
||||
# Add both detection and UI FPS
|
||||
detection_fps = metrics.get('detection_fps', 0)
|
||||
ui_fps = metrics.get('ui_fps', 0)
|
||||
detection_ms = metrics.get('avg_detection_ms', 0)
|
||||
|
||||
display_frame = draw_performance_overlay(
|
||||
display_frame,
|
||||
{
|
||||
"Detection FPS": f"{detection_fps:.1f}",
|
||||
"UI FPS": f"{ui_fps:.1f}",
|
||||
"Inference": f"{detection_ms:.1f} ms"
|
||||
}
|
||||
)
|
||||
|
||||
# Save frame if recording
|
||||
if self.save_video and self.video_writer:
|
||||
self.video_writer.write(display_frame)
|
||||
|
||||
# Convert to QPixmap for display
|
||||
pixmap = enhanced_cv_to_pixmap(display_frame)
|
||||
|
||||
# Emit signals
|
||||
self.frame_ready.emit(pixmap, detections, metrics)
|
||||
self.raw_frame_ready.emit(frame, detections, metrics)
|
||||
# Emit numpy frame for compatibility with existing connections
|
||||
self.frame_np_ready.emit(frame)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
def _on_stats_updated(self, stats):
|
||||
"""Handle updated statistics from the worker thread."""
|
||||
try:
|
||||
# Create a proper stats dictionary for the LiveTab
|
||||
ui_stats = {
|
||||
'fps': stats.get('detection_fps', 0.0),
|
||||
'detection_time': stats.get('avg_detection_ms', 0.0),
|
||||
'traffic_light_color': self._last_traffic_light_color
|
||||
}
|
||||
print(f"Emitting stats: {ui_stats}")
|
||||
# Emit as a dictionary - fixed signal/slot mismatch
|
||||
self.stats_ready.emit(ui_stats)
|
||||
except Exception as e:
|
||||
print(f"Error in stats update: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _process_frame_for_display(self, frame, detections, metrics=None):
|
||||
"""Process a frame for display, adding annotations."""
|
||||
try:
|
||||
# Create a copy for display
|
||||
display_frame = frame.copy()
|
||||
# Process traffic light detections to identify colors
|
||||
for det in detections:
|
||||
if det.get('class_name') == 'traffic light':
|
||||
# Get traffic light color
|
||||
bbox = det['bbox']
|
||||
light_color = detect_traffic_light_color(frame, bbox)
|
||||
|
||||
# Add color information to detection
|
||||
det['traffic_light_color'] = light_color
|
||||
|
||||
# Store the latest traffic light color
|
||||
self._last_traffic_light_color = light_color
|
||||
|
||||
# Use specialized drawing for traffic lights
|
||||
display_frame = draw_traffic_light_status(display_frame, bbox, light_color)
|
||||
|
||||
print(f"🚦 Traffic light detected with color: {light_color}")
|
||||
else:
|
||||
# Draw regular detection box
|
||||
bbox = det['bbox']
|
||||
x1, y1, x2, y2 = [int(c) for c in bbox]
|
||||
class_name = det.get('class_name', 'object')
|
||||
confidence = det.get('confidence', 0.0)
|
||||
|
||||
label = f"{class_name} {confidence:.2f}"
|
||||
color = (0, 255, 0) # Green for other objects
|
||||
|
||||
cv2.rectangle(display_frame, (x1, y1), (x2, y2), color, 2)
|
||||
cv2.putText(display_frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
||||
|
||||
# Add tracker visualization if tracking is enabled
|
||||
if self.tracker and hasattr(self, 'visualization_tracks'):
|
||||
# Draw current tracks
|
||||
for track_id, track_info in self.visualization_tracks.items():
|
||||
track_box = track_info.get('box')
|
||||
if track_box:
|
||||
x1, y1, x2, y2 = [int(c) for c in track_box]
|
||||
track_class = track_info.get('class_name', 'tracked')
|
||||
|
||||
# Draw track ID and class
|
||||
cv2.rectangle(display_frame, (x1, y1), (x2, y2), (255, 0, 255), 2)
|
||||
cv2.putText(display_frame, f"{track_class} #{track_id}",
|
||||
(x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
|
||||
|
||||
# Draw trail if available
|
||||
trail = track_info.get('trail', [])
|
||||
if len(trail) > 1:
|
||||
for i in range(1, len(trail)):
|
||||
cv2.line(display_frame,
|
||||
(int(trail[i-1][0]), int(trail[i-1][1])),
|
||||
(int(trail[i][0]), int(trail[i][1])),
|
||||
(255, 0, 255), 2)
|
||||
|
||||
# Add FPS counter if enabled
|
||||
if self.show_fps:
|
||||
# Add both detection and UI FPS
|
||||
detection_fps = metrics.get('detection_fps', 0)
|
||||
ui_fps = metrics.get('ui_fps', 0)
|
||||
detection_ms = metrics.get('avg_detection_ms', 0)
|
||||
|
||||
display_frame = draw_performance_overlay(
|
||||
display_frame,
|
||||
{
|
||||
"Detection FPS": f"{detection_fps:.1f}",
|
||||
"UI FPS": f"{ui_fps:.1f}",
|
||||
"Inference": f"{detection_ms:.1f} ms"
|
||||
}
|
||||
)
|
||||
|
||||
# Save frame if recording
|
||||
if self.save_video and self.video_writer:
|
||||
self.video_writer.write(display_frame)
|
||||
|
||||
# Convert to QPixmap for display
|
||||
pixmap = enhanced_cv_to_pixmap(display_frame)
|
||||
|
||||
# Emit signals
|
||||
self.frame_ready.emit(pixmap, detections, metrics)
|
||||
self.raw_frame_ready.emit(frame, detections, metrics)
|
||||
# Emit numpy frame for compatibility with existing connections
|
||||
self.frame_np_ready.emit(frame)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user