Clean push: Removed heavy files & added only latest snapshot
This commit is contained in:
10
qt_app_pyside1/.dockerignore
Normal file
10
qt_app_pyside1/.dockerignore
Normal file
@@ -0,0 +1,10 @@
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
.vscode/
|
||||
.env
|
||||
.git/
|
||||
logs/
|
||||
dist/
|
||||
build/
|
||||
*.spec
|
||||
BIN
qt_app_pyside1/Checkpoints/best_deeplabv3plus_mobilenet_cityscapes_os16.pth
LFS
Normal file
BIN
qt_app_pyside1/Checkpoints/best_deeplabv3plus_mobilenet_cityscapes_os16.pth
LFS
Normal file
Binary file not shown.
38
qt_app_pyside1/Dockerfile
Normal file
38
qt_app_pyside1/Dockerfile
Normal file
@@ -0,0 +1,38 @@
|
||||
# Dockerfile for qt_app_pyside1 (optimized)
|
||||
FROM python:3.10-slim
|
||||
|
||||
# Install system dependencies for OpenCV, PySide6, OpenVINO, X11 GUI, and supervisor
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ffmpeg \
|
||||
libgl1 \
|
||||
libegl1 \
|
||||
libglib2.0-0 \
|
||||
libsm6 \
|
||||
libxrender1 \
|
||||
libxext6 \
|
||||
xvfb \
|
||||
x11-apps \
|
||||
supervisor \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy requirements and install dependencies first for caching
|
||||
COPY requirements_enhanced.txt ./requirements_enhanced.txt
|
||||
RUN pip install --no-cache-dir -r requirements_enhanced.txt
|
||||
|
||||
# Copy all source code and models
|
||||
COPY . .
|
||||
|
||||
# Copy supervisor config
|
||||
COPY supervisord.conf /etc/supervisord.conf
|
||||
|
||||
# Make start.sh executable
|
||||
RUN chmod +x start.sh
|
||||
|
||||
# Expose display for X11 and logs
|
||||
ENV DISPLAY=:99
|
||||
VOLUME ["/app/logs"]
|
||||
|
||||
# Use supervisor to run Xvfb and app together, with logging
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]
|
||||
38
qt_app_pyside1/FixedDebug.spec
Normal file
38
qt_app_pyside1/FixedDebug.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[('ui', 'ui'), ('controllers', 'controllers'), ('utils', 'utils'), ('config.json', '.'), ('splash.py', '.')],
|
||||
hiddenimports=['ui', 'ui.main_window', 'controllers', 'utils', 'cv2', 'openvino', 'numpy', 'PySide6.QtCore', 'PySide6.QtWidgets', 'PySide6.QtGui'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='FixedDebug',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
36
qt_app_pyside1/QUICK_ACTION_PLAN.txt
Normal file
36
qt_app_pyside1/QUICK_ACTION_PLAN.txt
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
🚀 QUICK ACTION PLAN - Fix PyInstaller Build Issues
|
||||
==================================================
|
||||
|
||||
WHAT I'VE DONE FOR YOU:
|
||||
✅ Created missing __init__.py files in ui/ and controllers/
|
||||
✅ Created build_exe_optimized.py with ALL fixes
|
||||
✅ Analyzed your build log and identified all critical errors
|
||||
|
||||
IMMEDIATE NEXT STEPS:
|
||||
1. Run the optimized build script:
|
||||
python build_exe_optimized.py
|
||||
|
||||
2. If build succeeds, test the executable:
|
||||
dist\TrafficMonitoringApp.exe
|
||||
|
||||
KEY FIXES APPLIED:
|
||||
- Missing __init__.py files (CRITICAL ERROR FIX)
|
||||
- Complete hidden import coverage for cv2, numpy, openvino, etc.
|
||||
- Excluded heavy unused modules (50MB+ size reduction)
|
||||
- Proper data file inclusion
|
||||
- Windows-specific optimizations
|
||||
|
||||
WHAT TO EXPECT:
|
||||
- Build should complete successfully now
|
||||
- Executable size ~200MB (down from 300MB+)
|
||||
- All UI components should load
|
||||
- Video processing should work
|
||||
- Configuration loading should work
|
||||
|
||||
IF ISSUES PERSIST:
|
||||
1. Check Python version (3.8-3.11 recommended)
|
||||
2. Verify all packages installed: pip install -r requirements.txt
|
||||
3. Clear cache: python -m pip cache purge
|
||||
4. Run in clean virtual environment
|
||||
"""
|
||||
38
qt_app_pyside1/QuickDebug.spec
Normal file
38
qt_app_pyside1/QuickDebug.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=[],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='QuickDebug',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
74
qt_app_pyside1/README.md
Normal file
74
qt_app_pyside1/README.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# PySide6 Traffic Monitoring Dashboard (Advanced)
|
||||
|
||||
## Features
|
||||
|
||||
- Real-time video detection (OpenVINO, YOLO)
|
||||
- Drag-and-drop video/image, webcam, RTSP
|
||||
- Live overlays (bounding boxes, labels, violations)
|
||||
- Analytics: trends, histograms, summary cards
|
||||
- Violations: searchable, filterable, snapshot preview
|
||||
- Export: CSV/JSON, config editor, reload/apply
|
||||
- Sidebar: device, thresholds, toggles, dark/light mode
|
||||
- Performance overlay: CPU, RAM, FPS, backend
|
||||
- Modern UI: QSS, icons, rounded corners, animations
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
qt_app_pyside/
|
||||
├── main.py
|
||||
├── ui/
|
||||
│ ├── main_window.py
|
||||
│ ├── live_tab.py
|
||||
│ ├── analytics_tab.py
|
||||
│ ├── violations_tab.py
|
||||
│ ├── export_tab.py
|
||||
│ └── config_panel.py
|
||||
├── controllers/
|
||||
│ ├── video_controller.py
|
||||
│ ├── analytics_controller.py
|
||||
│ └── performance_overlay.py
|
||||
├── utils/
|
||||
│ ├── helpers.py
|
||||
│ └── annotation_utils.py
|
||||
├── resources/
|
||||
│ ├── icons/
|
||||
│ ├── style.qss
|
||||
│ └── themes/
|
||||
│ ├── dark.qss
|
||||
│ └── light.qss
|
||||
├── config.json
|
||||
├── requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
1. Install requirements: `pip install -r requirements.txt`
|
||||
|
||||
2. Run the application (several options):
|
||||
- **Recommended**: Use the enhanced controller: `python run_app.py`
|
||||
- Standard mode: `python main.py`
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
The application now includes an enhanced video controller that is automatically activated at startup:
|
||||
|
||||
- ✅ **Async Inference Pipeline**: Better frame rate and responsiveness
|
||||
- ✅ **FP16 Precision**: Optimized for CPU performance
|
||||
- ✅ **Separate FPS Tracking**: UI and detection metrics are tracked separately
|
||||
- ✅ **Auto Model Selection**: Uses optimal model based on device (yolo11n for CPU, yolo11x for GPU)
|
||||
- ✅ **OpenVINO Embedder**: Optimized DeepSORT tracking with OpenVINO backend
|
||||
|
||||
## Integration
|
||||
|
||||
- Plug in your detection logic from `detection_openvino.py` and `violation_openvino.py` in the controllers.
|
||||
- Use `config.json` for all parameters.
|
||||
- Extend UI/controllers for advanced analytics, export, and overlays.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter import errors:
|
||||
|
||||
- Try running with `python run_app.py` which handles import paths automatically
|
||||
- Ensure you have all required dependencies installed
|
||||
- Check that the correct model files exist in the openvino_models directory
|
||||
38
qt_app_pyside1/TrafficMonitor.spec
Normal file
38
qt_app_pyside1/TrafficMonitor.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[('ui', 'ui'), ('controllers', 'controllers'), ('utils', 'utils'), ('openvino_models', 'openvino_models'), ('resources', 'resources'), ('config.json', '.'), ('splash.py', '.')],
|
||||
hiddenimports=['cv2', 'openvino', 'numpy', 'PySide6.QtCore', 'PySide6.QtWidgets', 'PySide6.QtGui', 'json', 'os', 'sys', 'time', 'traceback', 'pathlib'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='TrafficMonitor',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=False,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
38
qt_app_pyside1/TrafficMonitorDebug.spec
Normal file
38
qt_app_pyside1/TrafficMonitorDebug.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[('ui', 'ui'), ('controllers', 'controllers'), ('utils', 'utils'), ('openvino_models', 'openvino_models'), ('resources', 'resources'), ('config.json', '.'), ('splash.py', '.')],
|
||||
hiddenimports=['cv2', 'openvino', 'numpy', 'PySide6.QtCore', 'PySide6.QtWidgets', 'PySide6.QtGui', 'json', 'os', 'sys', 'time', 'traceback', 'pathlib'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='TrafficMonitorDebug',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
38
qt_app_pyside1/TrafficMonitorFixed.spec
Normal file
38
qt_app_pyside1/TrafficMonitorFixed.spec
Normal file
@@ -0,0 +1,38 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[('ui', 'ui'), ('splash.py', '.'), ('config.json', '.'), ('controllers', 'controllers'), ('utils', 'utils'), ('openvino_models', 'openvino_models')],
|
||||
hiddenimports=['json', 'datetime', 'pathlib', 'os', 'sys', 'time', 'traceback'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
optimize=0,
|
||||
)
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='TrafficMonitorFixed',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
0
qt_app_pyside1/__init__.py
Normal file
0
qt_app_pyside1/__init__.py
Normal file
Binary file not shown.
BIN
qt_app_pyside1/__pycache__/splash.cpython-311.pyc
Normal file
BIN
qt_app_pyside1/__pycache__/splash.cpython-311.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/Analysis-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/Analysis-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/EXE-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/EXE-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/FixedDebug.pkg
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/FixedDebug.pkg
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/PKG-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/PKG-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/PYZ-00.pyz
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/PYZ-00.pyz
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/PYZ-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/PYZ-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/base_library.zip
Normal file
BIN
qt_app_pyside1/build/FixedDebug/base_library.zip
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod01_archive.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod01_archive.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod02_importers.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod02_importers.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod03_ctypes.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod03_ctypes.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod04_pywin32.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/pyimod04_pywin32.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/FixedDebug/localpycs/struct.pyc
Normal file
BIN
qt_app_pyside1/build/FixedDebug/localpycs/struct.pyc
Normal file
Binary file not shown.
906
qt_app_pyside1/build/FixedDebug/warn-FixedDebug.txt
Normal file
906
qt_app_pyside1/build/FixedDebug/warn-FixedDebug.txt
Normal file
@@ -0,0 +1,906 @@
|
||||
|
||||
This file lists modules PyInstaller was not able to find. This does not
|
||||
necessarily mean this module is required for running your program. Python and
|
||||
Python 3rd-party packages include a lot of conditional or optional modules. For
|
||||
example the module 'ntpath' only exists on Windows, whereas the module
|
||||
'posixpath' only exists on Posix systems.
|
||||
|
||||
Types if import:
|
||||
* top-level: imported at the top-level - look at these first
|
||||
* conditional: imported within an if-statement
|
||||
* delayed: imported within a function
|
||||
* optional: imported within a try-except-statement
|
||||
|
||||
IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
|
||||
tracking down the missing module yourself. Thanks!
|
||||
|
||||
missing module named usercustomize - imported by site (delayed, optional)
|
||||
missing module named sitecustomize - imported by site (delayed, optional)
|
||||
missing module named org - imported by copy (optional)
|
||||
missing module named 'org.python' - imported by pickle (optional), xml.sax (delayed, conditional), setuptools.sandbox (conditional)
|
||||
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional), http.server (delayed, optional), webbrowser (delayed), psutil (optional), netrc (delayed, conditional), getpass (delayed), distutils.util (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), distutils.archive_util (optional), setuptools._distutils.util (delayed, conditional, optional), setuptools._distutils.archive_util (optional)
|
||||
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), distutils.archive_util (optional), setuptools._distutils.archive_util (optional)
|
||||
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional)
|
||||
missing module named resource - imported by posix (top-level), fsspec.asyn (conditional, optional), torch._inductor.codecache (delayed, conditional)
|
||||
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional), zipimport (top-level)
|
||||
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional), zipimport (top-level)
|
||||
missing module named _posixsubprocess - imported by subprocess (conditional), multiprocessing.util (delayed), joblib.externals.loky.backend.fork_exec (delayed)
|
||||
missing module named fcntl - imported by subprocess (optional), xmlrpc.server (optional), tqdm.utils (delayed, optional), absl.flags._helpers (optional), filelock._unix (conditional, optional), pty (delayed, optional), torch.testing._internal.distributed.distributed_test (conditional)
|
||||
missing module named win32evtlog - imported by logging.handlers (delayed, optional)
|
||||
missing module named win32evtlogutil - imported by logging.handlers (delayed, optional)
|
||||
missing module named startup - imported by pyreadline3.keysyms.common (conditional), pyreadline3.keysyms.keysyms (conditional)
|
||||
missing module named sets - imported by pyreadline3.keysyms.common (optional), pytz.tzinfo (optional)
|
||||
missing module named System - imported by pyreadline3.clipboard.ironpython_clipboard (top-level), pyreadline3.keysyms.ironpython_keysyms (top-level), pyreadline3.console.ironpython_console (top-level), pyreadline3.rlmain (conditional)
|
||||
missing module named console - imported by pyreadline3.console.ansi (conditional)
|
||||
missing module named clr - imported by pyreadline3.clipboard.ironpython_clipboard (top-level), pyreadline3.console.ironpython_console (top-level)
|
||||
missing module named IronPythonConsole - imported by pyreadline3.console.ironpython_console (top-level)
|
||||
missing module named vms_lib - imported by platform (delayed, optional)
|
||||
missing module named 'java.lang' - imported by platform (delayed, optional), xml.sax._exceptions (conditional)
|
||||
missing module named java - imported by platform (delayed)
|
||||
missing module named _winreg - imported by platform (delayed, optional), pygments.formatters.img (optional)
|
||||
missing module named termios - imported by tty (top-level), getpass (optional), tqdm.utils (delayed, optional), absl.flags._helpers (optional), click._termui_impl (conditional)
|
||||
missing module named pyimod02_importers - imported by C:\Users\jatin\.conda\envs\traffic_monitor\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgutil.py (delayed), C:\Users\jatin\.conda\envs\traffic_monitor\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgres.py (delayed)
|
||||
missing module named _manylinux - imported by packaging._manylinux (delayed, optional), setuptools._vendor.packaging._manylinux (delayed, optional), wheel.vendored.packaging._manylinux (delayed, optional)
|
||||
missing module named '_typeshed.importlib' - imported by pkg_resources (conditional)
|
||||
missing module named _typeshed - imported by pkg_resources (conditional), setuptools.glob (conditional), setuptools.compat.py311 (conditional), torch.utils._backport_slots (conditional), streamlit.runtime.state.query_params (conditional), git.objects.fun (conditional), streamlit.runtime.state.query_params_proxy (conditional), setuptools._distutils.dist (conditional)
|
||||
missing module named jnius - imported by setuptools._vendor.platformdirs.android (delayed, conditional, optional)
|
||||
missing module named android - imported by setuptools._vendor.platformdirs.android (delayed, conditional, optional)
|
||||
missing module named _posixshmem - imported by multiprocessing.resource_tracker (conditional), multiprocessing.shared_memory (conditional)
|
||||
missing module named multiprocessing.set_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
|
||||
missing module named multiprocessing.get_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
|
||||
missing module named multiprocessing.get_context - imported by multiprocessing (top-level), multiprocessing.pool (top-level), multiprocessing.managers (top-level), multiprocessing.sharedctypes (top-level), joblib.externals.loky.backend.context (top-level)
|
||||
missing module named multiprocessing.TimeoutError - imported by multiprocessing (top-level), multiprocessing.pool (top-level), joblib.parallel (top-level)
|
||||
missing module named _scproxy - imported by urllib.request (conditional)
|
||||
missing module named multiprocessing.BufferTooShort - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
|
||||
missing module named multiprocessing.AuthenticationError - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
|
||||
missing module named multiprocessing.cpu_count - imported by multiprocessing (delayed, conditional, optional), skimage.util.apply_parallel (delayed, conditional, optional)
|
||||
missing module named multiprocessing.Pool - imported by multiprocessing (top-level), torchvision.datasets.kinetics (top-level), scipy._lib._util (delayed, conditional)
|
||||
missing module named multiprocessing.RLock - imported by multiprocessing (delayed, conditional, optional), tqdm.std (delayed, conditional, optional)
|
||||
missing module named asyncio.DefaultEventLoopPolicy - imported by asyncio (delayed, conditional), asyncio.events (delayed, conditional)
|
||||
missing module named 'distutils._modified' - imported by setuptools._distutils.file_util (delayed)
|
||||
missing module named 'distutils._log' - imported by setuptools._distutils.command.bdist_dumb (top-level), setuptools._distutils.command.bdist_rpm (top-level), setuptools._distutils.command.build_clib (top-level), setuptools._distutils.command.build_ext (top-level), setuptools._distutils.command.build_py (top-level), setuptools._distutils.command.build_scripts (top-level), setuptools._distutils.command.clean (top-level), setuptools._distutils.command.config (top-level), setuptools._distutils.command.install (top-level), setuptools._distutils.command.install_scripts (top-level), setuptools._distutils.command.sdist (top-level)
|
||||
missing module named trove_classifiers - imported by setuptools.config._validate_pyproject.formats (optional)
|
||||
missing module named importlib_resources - imported by setuptools._vendor.jaraco.text (optional), tqdm.cli (delayed, conditional, optional), jsonschema_specifications._core (optional)
|
||||
missing module named numpy.arccosh - imported by numpy (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.arcsinh - imported by numpy (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.arctan - imported by numpy (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.tan - imported by numpy (top-level), scipy.signal._spline_filters (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.complex128 - imported by numpy (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.complex64 - imported by numpy (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.greater - imported by numpy (top-level), scipy.optimize._minpack_py (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.power - imported by numpy (top-level), scipy.stats._kde (top-level)
|
||||
missing module named numpy.sinh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.cosh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.tanh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.expm1 - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.log1p - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.ceil - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.log - imported by numpy (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._discrete_distns (top-level), scipy.stats._morestats (top-level), scipy.signal._waveforms (top-level)
|
||||
missing module named numpy.logical_and - imported by numpy (top-level), scipy.stats._distn_infrastructure (top-level)
|
||||
missing module named numpy.sign - imported by numpy (top-level), scipy.linalg._matfuncs (top-level)
|
||||
missing module named numpy.conjugate - imported by numpy (top-level), scipy.linalg._matfuncs (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.logical_not - imported by numpy (top-level), scipy.linalg._matfuncs (top-level)
|
||||
missing module named numpy.single - imported by numpy (top-level), scipy.linalg._decomp_schur (top-level)
|
||||
missing module named numpy.floor - imported by numpy (top-level), scipy.special._basic (top-level), scipy.special._orthogonal (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.arcsin - imported by numpy (top-level), scipy.linalg._decomp_svd (top-level)
|
||||
missing module named numpy.arccos - imported by numpy (top-level), scipy.linalg._decomp_svd (top-level), scipy.special._orthogonal (top-level)
|
||||
missing module named numpy.conj - imported by numpy (top-level), scipy.linalg._decomp (top-level), scipy.io._mmio (top-level)
|
||||
missing module named numpy.inexact - imported by numpy (top-level), scipy.linalg._decomp (top-level), scipy.special._basic (top-level), scipy.optimize._minpack_py (top-level)
|
||||
missing module named _dummy_thread - imported by numpy.core.arrayprint (optional), cffi.lock (conditional, optional), torch._jit_internal (optional)
|
||||
missing module named numpy.core.result_type - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.float_ - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.number - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.object_ - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.max - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.all - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.errstate - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.bool_ - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.inf - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.isnan - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.array2string - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.imag - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.real - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.iscomplexobj - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.signbit - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.isscalar - imported by numpy.core (delayed), numpy.testing._private.utils (delayed), numpy.lib.polynomial (top-level)
|
||||
missing module named win32pdh - imported by numpy.testing._private.utils (delayed, conditional)
|
||||
missing module named numpy.core.array - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.isnat - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.ndarray - imported by numpy.core (top-level), numpy.testing._private.utils (top-level), numpy.lib.utils (top-level)
|
||||
missing module named numpy.core.array_repr - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.arange - imported by numpy.core (top-level), numpy.testing._private.utils (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.empty - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.float32 - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.intp - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.linspace - imported by numpy.core (top-level), numpy.lib.index_tricks (top-level)
|
||||
missing module named numpy.core.iinfo - imported by numpy.core (top-level), numpy.lib.twodim_base (top-level)
|
||||
missing module named numpy.core.transpose - imported by numpy.core (top-level), numpy.lib.function_base (top-level)
|
||||
missing module named numpy._typing._ufunc - imported by numpy._typing (conditional)
|
||||
missing module named numpy.uint - imported by numpy (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level)
|
||||
missing module named numpy.core.asarray - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.utils (top-level), numpy.fft._pocketfft (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.integer - imported by numpy.core (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.sqrt - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.conjugate - imported by numpy.core (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.swapaxes - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.zeros - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.reciprocal - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sort - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.argsort - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sign - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.count_nonzero - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.divide - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.matmul - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.asanyarray - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.atleast_2d - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.prod - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.amax - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.amin - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.moveaxis - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.geterrobj - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.finfo - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.isfinite - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sum - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.multiply - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.add - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.dot - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.Inf - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.newaxis - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.complexfloating - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.inexact - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.cdouble - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.csingle - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.double - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.single - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.intc - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.empty_like - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named pyodide_js - imported by threadpoolctl (delayed, optional)
|
||||
missing module named numpy.core.ufunc - imported by numpy.core (top-level), numpy.lib.utils (top-level)
|
||||
missing module named numpy.core.ones - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.hstack - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.atleast_1d - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.atleast_3d - imported by numpy.core (top-level), numpy.lib.shape_base (top-level)
|
||||
missing module named numpy.core.vstack - imported by numpy.core (top-level), numpy.lib.shape_base (top-level)
|
||||
missing module named pickle5 - imported by numpy.compat.py3k (optional)
|
||||
missing module named numpy.eye - imported by numpy (delayed), numpy.core.numeric (delayed), scipy.optimize._optimize (top-level), scipy.linalg._decomp (top-level), scipy.interpolate._pade (top-level), scipy.signal._lti_conversion (top-level)
|
||||
missing module named numpy.recarray - imported by numpy (top-level), numpy.lib.recfunctions (top-level), numpy.ma.mrecords (top-level)
|
||||
missing module named numpy.expand_dims - imported by numpy (top-level), numpy.ma.core (top-level)
|
||||
missing module named numpy.array - imported by numpy (top-level), numpy.ma.core (top-level), numpy.ma.extras (top-level), numpy.ma.mrecords (top-level), scipy.linalg._decomp (top-level), scipy.linalg._decomp_schur (top-level), scipy.sparse.linalg._isolve.utils (top-level), scipy.stats._stats_py (top-level), scipy.interpolate._interpolate (top-level), scipy.interpolate._fitpack_impl (top-level), scipy.interpolate._fitpack2 (top-level), scipy.integrate._ode (top-level), scipy._lib._finite_differences (top-level), scipy.stats._morestats (top-level), scipy.optimize._lbfgsb_py (top-level), scipy.optimize._tnc (top-level), scipy.optimize._slsqp_py (top-level), dill._objects (optional), scipy.io._netcdf (top-level), scipy.signal._spline_filters (top-level), scipy.signal._filter_design (top-level), scipy.signal._lti_conversion (top-level)
|
||||
missing module named numpy.iscomplexobj - imported by numpy (top-level), numpy.ma.core (top-level), scipy.linalg._decomp (top-level), scipy.linalg._decomp_ldl (top-level)
|
||||
missing module named numpy.amin - imported by numpy (top-level), numpy.ma.core (top-level), scipy.stats._morestats (top-level)
|
||||
missing module named numpy.amax - imported by numpy (top-level), numpy.ma.core (top-level), scipy.linalg._matfuncs (top-level), scipy.stats._morestats (top-level)
|
||||
missing module named numpy.isinf - imported by numpy (top-level), numpy.testing._private.utils (top-level), scipy.stats._distn_infrastructure (top-level)
|
||||
missing module named numpy.isnan - imported by numpy (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.isfinite - imported by numpy (top-level), numpy.testing._private.utils (top-level), scipy.linalg._decomp (top-level), scipy.linalg._matfuncs (top-level), scipy.optimize._slsqp_py (top-level)
|
||||
missing module named numpy.float64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy.stats._mstats_extras (top-level), scipy.optimize._lbfgsb_py (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.float32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.uint64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint16 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint8 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), dill._objects (optional), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int16 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int8 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.bytes_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.str_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.void - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.object_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.datetime64 - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.timedelta64 - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.number - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.complexfloating - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.floating - imported by numpy (top-level), numpy._typing._array_like (top-level), torch._dynamo.variables.misc (optional)
|
||||
missing module named numpy.integer - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ctypeslib (top-level)
|
||||
missing module named numpy.unsignedinteger - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.bool_ - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ma.core (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.generic - imported by numpy (top-level), numpy._typing._array_like (top-level), torch._dynamo.variables.misc (optional)
|
||||
missing module named numpy.dtype - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.array_api._typing (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), numpy.ctypeslib (top-level), scipy.optimize._minpack_py (top-level), dill._dill (delayed), scipy.io._netcdf (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), torch._dynamo.variables.misc (optional), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level)
|
||||
missing module named numpy.ndarray - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ma.core (top-level), numpy.ma.extras (top-level), numpy.lib.recfunctions (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), numpy.ctypeslib (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._mstats_basic (top-level), scipy.stats._mstats_extras (top-level), pandas.compat.numpy.function (top-level), dill._dill (delayed), scipy.io._mmio (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), imageio.typing (optional)
|
||||
missing module named numpy.ufunc - imported by numpy (top-level), numpy._typing (top-level), numpy.testing.overrides (top-level), dill._dill (delayed), dill._objects (optional), skimage._vendored.numpy_lookfor (top-level)
|
||||
missing module named numpy.histogramdd - imported by numpy (delayed), numpy.lib.twodim_base (delayed)
|
||||
missing module named numpy._distributor_init_local - imported by numpy (optional), numpy._distributor_init (optional)
|
||||
missing module named openvino_tokenizers - imported by openvino.tools.ovc.utils (delayed, optional)
|
||||
missing module named StringIO - imported by six (conditional)
|
||||
missing module named six.moves.zip - imported by six.moves (top-level), pasta.base.annotate (top-level)
|
||||
runtime module named six.moves - imported by dateutil.tz.tz (top-level), dateutil.tz._factories (top-level), dateutil.tz.win (top-level), dateutil.rrule (top-level), astunparse (top-level), tensorflow.python.distribute.coordinator.cluster_coordinator (top-level), six.moves.urllib (top-level), tensorflow.python.distribute.multi_process_runner (top-level), pasta.base.annotate (top-level)
|
||||
missing module named six.moves.cStringIO - imported by six.moves (top-level), astunparse (top-level)
|
||||
missing module named six.moves.range - imported by six.moves (top-level), dateutil.rrule (top-level)
|
||||
missing module named rules_python - imported by tensorflow.python.platform.resource_loader (optional)
|
||||
missing module named google.protobuf.pyext._message - imported by google.protobuf.pyext (conditional, optional), google.protobuf.internal.api_implementation (conditional, optional), google.protobuf.descriptor (conditional), google.protobuf.pyext.cpp_message (conditional)
|
||||
missing module named google.protobuf.enable_deterministic_proto_serialization - imported by google.protobuf (optional), google.protobuf.internal.api_implementation (optional)
|
||||
missing module named google.protobuf.internal._api_implementation - imported by google.protobuf.internal (optional), google.protobuf.internal.api_implementation (optional)
|
||||
missing module named astn - imported by gast.ast2 (top-level)
|
||||
missing module named theano - imported by opt_einsum.backends.theano (delayed)
|
||||
missing module named jax - imported by scipy._lib.array_api_compat.common._helpers (delayed), optree.integrations.jax (top-level), opt_einsum.backends.jax (delayed, conditional), keras.src.trainers.data_adapters.data_adapter_utils (delayed), keras.src.backend.jax.core (top-level), keras.src.backend.jax.distribution_lib (top-level), keras.src.backend.jax.image (top-level), keras.src.backend.jax.linalg (top-level), keras.src.backend.jax.math (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.random (top-level), keras.src.backend.jax.rnn (top-level), keras.src.backend.jax.trainer (top-level), keras.src.backend.numpy.nn (top-level), keras.src.backend.jax.export (delayed), tensorflow.lite.python.util (optional), sklearn.externals.array_api_compat.common._helpers (delayed), sklearn.externals.array_api_extra._lib._lazy (delayed, conditional), openvino.frontend.jax.utils (top-level), openvino.frontend.jax.jaxpr_decoder (top-level), openvino.tools.ovc.convert_impl (delayed, conditional), keras.src.backend.jax.optimizer (top-level), keras.src.ops.nn (delayed, conditional), scipy._lib._array_api (delayed, conditional)
|
||||
missing module named cupy - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), opt_einsum.backends.cupy (delayed), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.cupy (top-level), sklearn.externals.array_api_compat.cupy._aliases (top-level), sklearn.externals.array_api_compat.cupy._info (top-level), sklearn.externals.array_api_compat.cupy._typing (top-level), sklearn.utils._testing (delayed, conditional), scipy._lib.array_api_compat.cupy (top-level), scipy._lib.array_api_compat.cupy._aliases (top-level), scipy._lib.array_api_compat.cupy._info (top-level), scipy._lib.array_api_compat.cupy._typing (top-level), scipy._lib._array_api (delayed, conditional), narwhals._pandas_like.series (delayed, conditional), sklearn.externals.array_api_compat.cupy.fft (top-level), sklearn.externals.array_api_compat.cupy.linalg (top-level)
|
||||
missing module named simplejson - imported by requests.compat (conditional, optional), huggingface_hub.utils._fixes (optional)
|
||||
missing module named dummy_threading - imported by requests.cookies (optional), joblib.compressor (optional)
|
||||
missing module named 'h2.events' - imported by urllib3.http2.connection (top-level)
|
||||
missing module named 'h2.connection' - imported by urllib3.http2.connection (top-level)
|
||||
missing module named h2 - imported by urllib3.http2.connection (top-level)
|
||||
missing module named zstandard - imported by urllib3.util.request (optional), urllib3.response (optional), fsspec.compression (optional)
|
||||
missing module named brotlicffi - imported by urllib3.util.request (optional), urllib3.response (optional), aiohttp.compression_utils (optional)
|
||||
missing module named collections.Callable - imported by collections (optional), cffi.api (optional), socks (optional)
|
||||
missing module named bcrypt - imported by cryptography.hazmat.primitives.serialization.ssh (optional)
|
||||
missing module named cryptography.x509.UnsupportedExtension - imported by cryptography.x509 (optional), urllib3.contrib.pyopenssl (optional)
|
||||
missing module named chardet - imported by requests (optional), pygments.lexer (delayed, conditional, optional)
|
||||
missing module named 'pyodide.ffi' - imported by urllib3.contrib.emscripten.fetch (delayed, optional)
|
||||
missing module named pyodide - imported by urllib3.contrib.emscripten.fetch (top-level)
|
||||
missing module named js - imported by urllib3.contrib.emscripten.fetch (top-level), fsspec.implementations.http_sync (delayed, optional)
|
||||
missing module named oauth2client - imported by tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver (optional), tensorflow.python.tpu.client.client (optional)
|
||||
missing module named googleapiclient - imported by tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver (optional), tensorflow.python.tpu.client.client (optional)
|
||||
missing module named cloud_tpu_client - imported by tensorflow.python.distribute.cluster_resolver.tpu.tpu_cluster_resolver (optional)
|
||||
missing module named kubernetes - imported by tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver (delayed, conditional, optional)
|
||||
missing module named distributed - imported by fsspec.transaction (delayed), joblib._dask (optional), joblib._parallel_backends (delayed, optional)
|
||||
missing module named 'sphinx.ext' - imported by pyarrow.vendored.docscrape (delayed, conditional)
|
||||
missing module named dateutil.tz.tzfile - imported by dateutil.tz (top-level), dateutil.zoneinfo (top-level)
|
||||
missing module named pytest - imported by scipy._lib._testutils (delayed), sympy.testing.runtests_pytest (optional), torch.testing._internal.common_utils (delayed, conditional, optional), h5py.tests (delayed, optional), networkx.classes.backends (conditional, optional), sklearn.utils._testing (optional), torch.testing._internal.optests.generate_tests (delayed, conditional), pandas._testing._io (delayed), pandas._testing (delayed), skimage._shared.tester (delayed), fsspec.conftest (top-level), pyarrow.conftest (top-level), pyarrow.tests.util (top-level), torch._numpy.testing.utils (delayed), skimage.filters.rank.tests.test_rank (top-level), skimage.data._fetchers (delayed, conditional), skimage._shared.testing (top-level)
|
||||
missing module named 'cupy_backends.cuda' - imported by scipy._lib.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'cupy.cuda' - imported by sklearn.externals.array_api_compat.cupy._typing (top-level), sklearn.externals.array_api_compat.common._helpers (delayed), scipy._lib.array_api_compat.cupy._typing (top-level), scipy._lib.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'jax.experimental' - imported by keras.src.trainers.data_adapters.data_adapter_utils (delayed), keras.src.testing.test_case (delayed, conditional), keras.src.backend.jax.core (top-level), keras.src.backend.jax.distribution_lib (top-level), keras.src.backend.jax.numpy (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.sparse (top-level), keras.src.backend.jax.export (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), scipy._lib.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named 'jax.numpy' - imported by optree.integrations.jax (top-level), keras.src.backend.jax.core (top-level), keras.src.backend.jax.image (top-level), keras.src.backend.jax.linalg (top-level), keras.src.backend.jax.math (top-level), keras.src.backend.jax.numpy (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.sparse (top-level), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), openvino.frontend.jax.utils (top-level), scipy._lib.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named sparse - imported by scipy.sparse.linalg._expm_multiply (delayed, conditional), scipy.sparse.linalg._matfuncs (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), scipy._lib.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named 'dask.array' - imported by sklearn.externals.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.dask.array (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.common._helpers (delayed, conditional), scipy._lib.array_api_compat.dask.array (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), narwhals._dask.expr (delayed), skimage.util.apply_parallel (delayed, optional), sklearn.externals.array_api_compat.dask.array.fft (top-level), sklearn.externals.array_api_compat.dask.array.linalg (top-level)
|
||||
missing module named ndonnx - imported by sklearn.externals.array_api_compat.common._helpers (delayed), scipy._lib.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'numpy.lib.array_utils' - imported by joblib._memmapping_reducer (delayed, optional), sklearn.externals.array_api_compat.common._linalg (conditional), scipy._lib.array_api_compat.common._linalg (conditional)
|
||||
missing module named 'numpy.linalg._linalg' - imported by sklearn.externals.array_api_compat.numpy.linalg (delayed, optional), scipy._lib.array_api_compat.numpy.linalg (delayed, optional)
|
||||
missing module named Cython - imported by scipy._lib._testutils (optional)
|
||||
missing module named cython - imported by scipy._lib._testutils (optional), pyarrow.conftest (optional)
|
||||
missing module named sphinx - imported by scipy._lib._docscrape (delayed, conditional)
|
||||
missing module named cupyx - imported by scipy._lib._array_api (delayed, conditional)
|
||||
missing module named scipy.sparse.issparse - imported by scipy.sparse (top-level), scipy.sparse.linalg._interface (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.sparse.linalg._norm (top-level), scipy.integrate._ivp.bdf (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.radau (top-level), scipy.sparse.csgraph._laplacian (top-level), scipy.optimize._constraints (top-level), scipy.optimize._trustregion_constr.projections (top-level), scipy.optimize._lsq.least_squares (top-level), scipy.optimize._lsq.common (top-level), scipy.optimize._lsq.lsq_linear (top-level), scipy.optimize._linprog_highs (top-level), scipy.optimize._differentialevolution (top-level), scipy.optimize._milp (top-level), scipy.io.matlab._mio (delayed, conditional), scipy.io._fast_matrix_market (top-level), scipy.io._mmio (top-level), tensorflow.python.keras.engine.data_adapter (delayed, optional), tensorflow.python.keras.engine.training_arrays_v1 (optional), tensorflow.python.keras.engine.training_v1 (optional), sklearn.utils._param_validation (top-level), sklearn.externals._scipy.sparse.csgraph._laplacian (top-level), sklearn.utils._set_output (top-level), sklearn.utils.multiclass (top-level), sklearn.metrics.cluster._unsupervised (top-level), sklearn.metrics.pairwise (top-level), sklearn.metrics._pairwise_distances_reduction._dispatcher (top-level), sklearn.cluster._feature_agglomeration (top-level), sklearn.cluster._bicluster (top-level), sklearn.neighbors._base (top-level), sklearn.decomposition._pca (top-level), sklearn.cluster._hdbscan.hdbscan (top-level), sklearn.cluster._optics (top-level), sklearn.manifold._isomap (top-level), sklearn.manifold._t_sne (top-level), sklearn.metrics._classification (top-level), sklearn.metrics._ranking (top-level), sklearn.utils._indexing (top-level), scipy._lib._array_api (delayed), pandas.core.dtypes.common (delayed, conditional, optional), sklearn.tree._classes (top-level), scipy.sparse.csgraph._validation (top-level)
|
||||
missing module named scipy.linalg._fblas_64 - imported by scipy.linalg (optional), scipy.linalg.blas (optional)
|
||||
missing module named scipy.linalg._cblas - imported by scipy.linalg (optional), scipy.linalg.blas (optional)
|
||||
missing module named scipy.linalg._flapack_64 - imported by scipy.linalg (optional), scipy.linalg.lapack (optional)
|
||||
missing module named scipy.linalg._clapack - imported by scipy.linalg (optional), scipy.linalg.lapack (optional)
|
||||
missing module named scipy.special.elliprg - imported by scipy.special (top-level), skimage.draw.draw3d (top-level)
|
||||
missing module named scipy.special.inv_boxcox - imported by scipy.special (top-level), sklearn.preprocessing._data (top-level)
|
||||
missing module named scipy.special.boxcox - imported by scipy.special (top-level), sklearn.preprocessing._data (top-level)
|
||||
missing module named scipy.special.sph_jn - imported by scipy.special (delayed, conditional, optional), sympy.functions.special.bessel (delayed, conditional, optional)
|
||||
missing module named scipy.special.gammaincinv - imported by scipy.special (top-level), scipy.stats._qmvnt (top-level)
|
||||
missing module named scipy.special.ive - imported by scipy.special (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.special.betaln - imported by scipy.special (top-level), scipy.stats._discrete_distns (top-level), scipy.stats._multivariate (top-level), sklearn.mixture._bayesian_mixture (top-level)
|
||||
missing module named scipy.special.beta - imported by scipy.special (top-level), scipy.stats._tukeylambda_stats (top-level)
|
||||
missing module named scipy.special.loggamma - imported by scipy.special (top-level), scipy.fft._fftlog_backend (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.interpolate.PPoly - imported by scipy.interpolate (top-level), scipy.interpolate._cubic (top-level), scipy.spatial.transform._rotation_spline (delayed), scipy.integrate._bvp (delayed)
|
||||
missing module named _curses - imported by curses (top-level), curses.has_key (top-level)
|
||||
missing module named olefile - imported by PIL.FpxImagePlugin (top-level), PIL.MicImagePlugin (top-level)
|
||||
missing module named xmlrpclib - imported by defusedxml.xmlrpc (conditional)
|
||||
missing module named railroad - imported by pyparsing.diagram (top-level)
|
||||
missing module named pyparsing.Word - imported by pyparsing (delayed), pyparsing.unicode (delayed), pydot.dot_parser (top-level)
|
||||
missing module named gi - imported by matplotlib.cbook (delayed, conditional)
|
||||
missing module named 'scikits.umfpack' - imported by scipy.optimize._linprog_ip (optional)
|
||||
missing module named 'sksparse.cholmod' - imported by scipy.optimize._linprog_ip (optional)
|
||||
missing module named sksparse - imported by scipy.optimize._trustregion_constr.projections (optional), scipy.optimize._linprog_ip (optional)
|
||||
missing module named scipy.optimize.root_scalar - imported by scipy.optimize (top-level), scipy.stats._continuous_distns (top-level), scipy.stats._stats_py (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.optimize.brentq - imported by scipy.optimize (delayed), scipy.integrate._ivp.ivp (delayed), scipy.stats._binomtest (top-level), scipy.stats._odds_ratio (top-level)
|
||||
missing module named scipy.optimize.OptimizeResult - imported by scipy.optimize (top-level), scipy.integrate._bvp (top-level), scipy.integrate._ivp.ivp (top-level), scipy._lib.cobyqa.main (top-level), scipy._lib.cobyqa.problem (top-level), scipy.optimize._lsq.least_squares (top-level), scipy.optimize._lsq.trf (top-level), scipy.optimize._lsq.dogbox (top-level), scipy.optimize._lsq.lsq_linear (top-level), scipy.optimize._lsq.trf_linear (top-level), scipy.optimize._lsq.bvls (top-level), scipy.optimize._spectral (top-level), scipy.optimize._differentialevolution (top-level), scipy.optimize._shgo (top-level), scipy.optimize._dual_annealing (top-level), scipy.optimize._qap (top-level), scipy.optimize._direct_py (top-level)
|
||||
missing module named scipy.optimize.minimize_scalar - imported by scipy.optimize (top-level), scipy.interpolate._bsplines (top-level), scipy.stats._multicomp (top-level)
|
||||
missing module named scipy.special.airy - imported by scipy.special (top-level), scipy.special._orthogonal (top-level)
|
||||
missing module named scipy.linalg.orthogonal_procrustes - imported by scipy.linalg (top-level), scipy.spatial._procrustes (top-level)
|
||||
missing module named scipy.linalg.qr_insert - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level)
|
||||
missing module named uarray - imported by scipy._lib.uarray (conditional, optional)
|
||||
missing module named scipy.sparse.linalg.matrix_power - imported by scipy.sparse.linalg (delayed), scipy.sparse._matrix (delayed)
|
||||
missing module named scikits - imported by scipy.sparse.linalg._dsolve.linsolve (optional)
|
||||
missing module named scipy.sparse.lil_matrix - imported by scipy.sparse (top-level), sklearn.manifold._locally_linear (top-level)
|
||||
missing module named scipy.sparse.dia_matrix - imported by scipy.sparse (top-level), sklearn.cluster._bicluster (top-level)
|
||||
missing module named scipy.sparse.sparray - imported by scipy.sparse (optional), sklearn.utils.fixes (optional)
|
||||
missing module named scipy.sparse.coo_array - imported by scipy.sparse (top-level), scipy.io._fast_matrix_market (top-level), scipy.io._mmio (top-level)
|
||||
missing module named scipy.sparse.vstack - imported by scipy.sparse (top-level), scipy.optimize._linprog_highs (top-level), scipy.optimize._milp (top-level)
|
||||
missing module named scipy.sparse.bmat - imported by scipy.sparse (top-level), scipy.optimize._trustregion_constr.projections (top-level), scipy.optimize._trustregion_constr.qp_subproblem (top-level)
|
||||
missing module named scipy.sparse.find - imported by scipy.sparse (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.common (top-level)
|
||||
missing module named scipy.sparse.csr_matrix - imported by scipy.sparse (top-level), scipy.optimize._numdiff (top-level), scipy.optimize._lsq.lsq_linear (top-level), sklearn.utils._param_validation (top-level), sklearn.metrics.pairwise (top-level), sklearn.neighbors._base (top-level), sklearn.manifold._locally_linear (top-level), sklearn.manifold._t_sne (top-level), sklearn.metrics._classification (top-level), sklearn.metrics._ranking (top-level)
|
||||
missing module named scipy.sparse.csc_matrix - imported by scipy.sparse (top-level), scipy.integrate._bvp (top-level), scipy.integrate._ivp.bdf (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.radau (top-level), scipy.linalg._sketches (top-level), scipy.optimize._trustregion_constr.projections (top-level), scipy.optimize._trustregion_constr.qp_subproblem (top-level), scipy.optimize._linprog_highs (top-level), scipy.io._harwell_boeing.hb (top-level), sklearn.cluster._spectral (top-level)
|
||||
missing module named scipy.sparse.coo_matrix - imported by scipy.sparse (top-level), scipy.integrate._bvp (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.common (top-level), scipy.stats._crosstab (top-level), pandas.core.arrays.sparse.accessor (delayed), scipy.io.matlab._mio (delayed, conditional), scipy.io._fast_matrix_market (top-level), scipy.io._mmio (top-level), sklearn.metrics._classification (top-level)
|
||||
missing module named scipy.sparse.diags - imported by scipy.sparse (delayed), scipy.sparse.linalg._special_sparse_arrays (delayed)
|
||||
missing module named scipy.sparse.spdiags - imported by scipy.sparse (delayed), scipy.sparse.linalg._special_sparse_arrays (delayed)
|
||||
missing module named scipy.sparse.dia_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level)
|
||||
missing module named scipy.sparse.kron - imported by scipy.sparse (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level)
|
||||
missing module named scipy.sparse.eye - imported by scipy.sparse (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level), scipy.integrate._ivp.bdf (top-level), scipy.integrate._ivp.radau (top-level), scipy.optimize._trustregion_constr.equality_constrained_sqp (top-level), scipy.optimize._trustregion_constr.projections (top-level), sklearn.manifold._locally_linear (top-level)
|
||||
missing module named scipy.sparse.diags_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level)
|
||||
missing module named scipy.sparse.eye_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level)
|
||||
missing module named scipy.sparse.csc_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.optimize._milp (top-level), scipy.io._harwell_boeing.hb (top-level)
|
||||
missing module named scipy.sparse.csr_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.interpolate._bsplines (top-level), scipy.interpolate._ndbspline (top-level)
|
||||
missing module named scipy.sparse.SparseEfficiencyWarning - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), sklearn.cluster._optics (top-level)
|
||||
missing module named scipy.stats.iqr - imported by scipy.stats (delayed), scipy.stats._hypotests (delayed)
|
||||
missing module named dummy_thread - imported by cffi.lock (conditional, optional)
|
||||
missing module named thread - imported by cffi.lock (conditional, optional), cffi.cparser (conditional, optional)
|
||||
missing module named cStringIO - imported by cffi.ffiplatform (optional)
|
||||
missing module named cPickle - imported by pycparser.ply.yacc (delayed, optional)
|
||||
missing module named cffi._pycparser - imported by cffi (optional), cffi.cparser (optional)
|
||||
missing module named scipy._distributor_init_local - imported by scipy (optional), scipy._distributor_init (optional)
|
||||
missing module named numexpr - imported by pandas.core.computation.expressions (conditional), pandas.core.computation.engines (delayed)
|
||||
missing module named pandas.core.groupby.PanelGroupBy - imported by pandas.core.groupby (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named numba - imported by pandas.core._numba.executor (delayed, conditional), pandas.core.util.numba_ (delayed, conditional), pandas.core.groupby.numba_ (delayed, conditional), pandas.core.window.numba_ (delayed, conditional), pandas.core.window.online (delayed, conditional), pandas.core._numba.kernels.mean_ (top-level), pandas.core._numba.kernels.shared (top-level), pandas.core._numba.kernels.sum_ (top-level), pandas.core._numba.kernels.min_max_ (top-level), pandas.core._numba.kernels.var_ (top-level), pandas.core._numba.extensions (top-level)
|
||||
missing module named 'numba.extending' - imported by pandas.core._numba.kernels.sum_ (top-level)
|
||||
missing module named pandas.core.window._Rolling_and_Expanding - imported by pandas.core.window (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named 'numba.typed' - imported by pandas.core._numba.extensions (delayed)
|
||||
missing module named 'numba.core' - imported by pandas.core._numba.extensions (top-level)
|
||||
missing module named traitlets - imported by pandas.io.formats.printing (delayed, conditional), plotly.basewidget (top-level), pydeck.widget.widget (top-level), altair.jupyter.jupyter_chart (top-level)
|
||||
missing module named 'IPython.core' - imported by sympy.interactive.printing (delayed, optional), pandas.io.formats.printing (delayed, conditional), h5py (delayed, conditional, optional), h5py.ipy_completer (top-level), rich.pretty (delayed, optional), altair.utils.core (delayed, conditional), altair._magics (top-level)
|
||||
missing module named IPython - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional, optional), pandas.io.formats.printing (delayed), h5py (delayed, conditional, optional), h5py.ipy_completer (top-level), keras.src.utils.model_visualization (delayed, conditional, optional), keras.src.saving.file_editor (delayed, optional), tensorflow.python.keras.utils.vis_utils (delayed, conditional, optional)
|
||||
missing module named botocore - imported by pandas.io.common (delayed, conditional, optional)
|
||||
missing module named 'lxml.etree' - imported by openpyxl.xml (delayed, optional), openpyxl.xml.functions (conditional), pandas.io.xml (delayed), pandas.io.formats.xml (delayed), networkx.readwrite.graphml (delayed, optional), pandas.io.html (delayed), imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named openpyxl.tests - imported by openpyxl.reader.excel (optional)
|
||||
missing module named 'odf.config' - imported by pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.style' - imported by pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.text' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.table' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.opendocument' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named xlrd - imported by pandas.io.excel._xlrd (delayed, conditional), pandas.io.excel._base (delayed, conditional)
|
||||
missing module named pyxlsb - imported by pandas.io.excel._pyxlsb (delayed, conditional)
|
||||
missing module named 'odf.office' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named 'odf.element' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named 'odf.namespaces' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named odf - imported by pandas.io.excel._odfreader (conditional)
|
||||
missing module named python_calamine - imported by pandas.io.excel._calamine (delayed, conditional)
|
||||
missing module named collections.Mapping - imported by collections (optional), pytz.lazy (optional)
|
||||
missing module named UserDict - imported by pytz.lazy (optional)
|
||||
missing module named Foundation - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named AppKit - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named PyQt4 - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named qtpy - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named 'sqlalchemy.engine' - imported by pandas.io.sql (delayed), streamlit.connections.sql_connection (conditional)
|
||||
missing module named 'sqlalchemy.types' - imported by pandas.io.sql (delayed, conditional)
|
||||
missing module named 'sqlalchemy.schema' - imported by pandas.io.sql (delayed)
|
||||
missing module named 'sqlalchemy.sql' - imported by pandas.io.sql (conditional)
|
||||
missing module named sqlalchemy - imported by pandas.io.sql (delayed, conditional), streamlit.connections.sql_connection (delayed)
|
||||
missing module named pandas.core.internals.Block - imported by pandas.core.internals (conditional), pandas.io.pytables (conditional)
|
||||
missing module named tables - imported by pandas.io.pytables (delayed, conditional)
|
||||
missing module named lxml - imported by sympy.utilities.mathml (delayed), pandas.io.xml (conditional), tifffile.tifffile (delayed, optional)
|
||||
missing module named 'google.auth' - imported by pandas.io.gbq (conditional)
|
||||
missing module named 'lxml.html' - imported by pandas.io.html (delayed)
|
||||
missing module named bs4 - imported by pandas.io.html (delayed)
|
||||
missing module named pandas.Panel - imported by pandas (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named 'pandas.api.internals' - imported by pyarrow.pandas_compat (delayed, conditional)
|
||||
missing module named 'pyarrow._cuda' - imported by pyarrow.cuda (top-level)
|
||||
missing module named 'pyarrow.gandiva' - imported by pyarrow.conftest (optional)
|
||||
missing module named 'pyarrow._azurefs' - imported by pyarrow.fs (optional)
|
||||
missing module named 'setuptools_scm.git' - imported by pyarrow (delayed, optional)
|
||||
missing module named setuptools_scm - imported by matplotlib (delayed, conditional, optional), pyarrow (optional), tqdm.version (optional)
|
||||
missing module named fastparquet - imported by fsspec.parquet (delayed), pyarrow.conftest (optional)
|
||||
missing module named requests_kerberos - imported by fsspec.implementations.webhdfs (delayed, conditional)
|
||||
missing module named smbprotocol - imported by fsspec.implementations.smb (top-level)
|
||||
missing module named smbclient - imported by fsspec.implementations.smb (top-level)
|
||||
missing module named paramiko - imported by fsspec.implementations.sftp (top-level)
|
||||
missing module named kerchunk - imported by fsspec.implementations.reference (delayed)
|
||||
missing module named ujson - imported by fsspec.implementations.cache_metadata (optional), fsspec.implementations.reference (optional)
|
||||
missing module named 'libarchive.ffi' - imported by fsspec.implementations.libarchive (top-level)
|
||||
missing module named libarchive - imported by fsspec.implementations.libarchive (top-level)
|
||||
missing module named uvloop - imported by aiohttp.worker (delayed)
|
||||
missing module named annotationlib - imported by attr._compat (conditional)
|
||||
missing module named async_timeout - imported by aiohttp.helpers (conditional), aiohttp.web_ws (conditional), aiohttp.client_ws (conditional)
|
||||
missing module named 'gunicorn.workers' - imported by aiohttp.worker (top-level)
|
||||
missing module named gunicorn - imported by aiohttp.worker (top-level)
|
||||
missing module named aiodns - imported by aiohttp.resolver (optional)
|
||||
missing module named pygit2 - imported by fsspec.implementations.git (top-level)
|
||||
missing module named 'distributed.worker' - imported by fsspec.implementations.dask (top-level)
|
||||
missing module named 'distributed.client' - imported by fsspec.implementations.dask (top-level)
|
||||
missing module named dask - imported by joblib._dask (optional), sklearn.externals.array_api_extra._lib._lazy (delayed, conditional), narwhals._polars.dataframe (delayed, conditional), narwhals._pandas_like.dataframe (delayed, conditional), narwhals._arrow.dataframe (delayed, conditional), fsspec.implementations.dask (top-level), skimage.restoration._cycle_spin (optional)
|
||||
missing module named panel - imported by fsspec.gui (top-level)
|
||||
missing module named fuse - imported by fsspec.fuse (top-level)
|
||||
missing module named lz4 - imported by fsspec.compression (optional), joblib.compressor (optional)
|
||||
missing module named snappy - imported by fsspec.compression (delayed, optional)
|
||||
missing module named lzmaffi - imported by fsspec.compression (optional)
|
||||
missing module named isal - imported by fsspec.compression (optional)
|
||||
missing module named 'IPython.display' - imported by tqdm.notebook (conditional, optional), rich.jupyter (delayed, optional), rich.live (delayed, conditional, optional), huggingface_hub._login (delayed, optional), pydeck.io.html (delayed), altair.vegalite.v5.display (delayed), altair.vegalite.v5.api (delayed, conditional)
|
||||
missing module named 'IPython.html' - imported by tqdm.notebook (conditional, optional)
|
||||
missing module named ipywidgets - imported by tqdm.notebook (conditional, optional), rich.live (delayed, conditional, optional), plotly.graph_objects (delayed, conditional, optional), plotly.graph_objs (delayed, conditional, optional), pydeck.widget.widget (top-level)
|
||||
missing module named boto3 - imported by tensorboard.compat.tensorflow_stub.io.gfile (optional)
|
||||
missing module named 'botocore.exceptions' - imported by tensorboard.compat.tensorflow_stub.io.gfile (optional)
|
||||
missing module named tensorboard.compat.notf - imported by tensorboard.compat (delayed, optional)
|
||||
missing module named 'tensorflow.compat' - imported by keras.src.callbacks.tensorboard (delayed), tensorboard.util.op_evaluator (delayed), tensorboard.util.encoder (delayed), tensorboard.plugins.audio.summary (delayed), tensorboard.plugins.custom_scalar.summary (delayed), tensorboard.plugins.histogram.summary (delayed), tensorboard.plugins.image.summary (delayed), tensorboard.plugins.pr_curve.summary (delayed), tensorboard.plugins.scalar.summary (delayed), tensorboard.plugins.text.summary (delayed)
|
||||
missing module named 'keras.optimizers.optimizer_v2' - imported by tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named triton - imported by torch._utils_internal (delayed, conditional), torch._dynamo.logging (conditional, optional), torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed), torch._inductor.runtime.autotune_cache (conditional), torch._inductor.runtime.coordinate_descent_tuner (optional), torch._inductor.runtime.triton_heuristics (conditional, optional), torch._inductor.codegen.wrapper (delayed, conditional), torch._inductor.kernel.mm_common (delayed), torch._inductor.kernel.mm_plus_mm (delayed), torch.sparse._triton_ops_meta (delayed, conditional), torch.sparse._triton_ops (conditional), torch._dynamo.utils (conditional), torch._inductor.compile_worker.__main__ (optional), torch._inductor.runtime.triton_helpers (top-level), torch.testing._internal.triton_utils (conditional)
|
||||
missing module named 'torch._C._distributed_c10d' - imported by torch.distributed (conditional), torch.distributed.distributed_c10d (top-level), torch.distributed.constants (top-level), torch.distributed.rpc (conditional), torch.distributed.tensor._collective_utils (top-level), torch.distributed._shard.sharded_tensor.reshard (top-level), torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag (top-level), torch.testing._internal.distributed.fake_pg (top-level), torch._dynamo.variables.distributed (delayed), torch.distributed._symmetric_memory (top-level), torch.distributed.elastic.control_plane (delayed), torch.testing._internal.distributed.multi_threaded_pg (top-level)
|
||||
missing module named torch.randperm - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named torch.Generator - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named torch.default_generator - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named soundfile - imported by torchaudio._backend.soundfile_backend (conditional, optional)
|
||||
missing module named torch.norm_except_dim - imported by torch (top-level), torch.nn.utils.weight_norm (top-level)
|
||||
missing module named torch._weight_norm - imported by torch (top-level), torch.nn.utils.weight_norm (top-level)
|
||||
missing module named 'triton.language' - imported by torch._inductor.codegen.triton_split_scan (delayed), torch._inductor.codegen.wrapper (delayed), torch.sparse._triton_ops (conditional), torch._inductor.runtime.triton_helpers (top-level), torch.testing._internal.triton_utils (conditional)
|
||||
missing module named 'triton.runtime' - imported by torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed), torch._inductor.runtime.triton_heuristics (conditional), torch._library.triton (delayed), torch._inductor.select_algorithm (delayed, optional), torch._inductor.ir (delayed), torch._dynamo.variables.builder (delayed, conditional), torch._inductor.fx_passes.reinplace (delayed, conditional), torch._inductor.utils (delayed)
|
||||
missing module named 'triton.compiler' - imported by torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed, optional), torch._inductor.runtime.hints (optional), torch._inductor.runtime.triton_heuristics (conditional, optional), torch._inductor.scheduler (delayed), torch._inductor.codegen.triton (delayed), torch._inductor.codecache (delayed, optional), torch._inductor.async_compile (delayed, optional)
|
||||
missing module named dl - imported by setuptools.command.build_ext (conditional, optional)
|
||||
missing module named 'Cython.Distutils' - imported by setuptools.command.build_ext (conditional, optional)
|
||||
missing module named 'win32com.shell' - imported by torch._appdirs (conditional, optional)
|
||||
missing module named 'com.sun' - imported by torch._appdirs (delayed, conditional, optional)
|
||||
missing module named com - imported by torch._appdirs (delayed)
|
||||
missing module named win32api - imported by torch._appdirs (delayed, conditional, optional)
|
||||
missing module named win32com - imported by torch._appdirs (delayed)
|
||||
missing module named halide - imported by torch._inductor.codecache (delayed, conditional), torch._inductor.runtime.halide_helpers (optional)
|
||||
missing module named gmpy2.qdiv - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.lcm - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.gcd - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.gcdext - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.denom - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.numer - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.mpq - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.mpz - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named 'pyglet.image' - imported by sympy.printing.preview (delayed, optional)
|
||||
missing module named 'pyglet.window' - imported by sympy.plotting.pygletplot.managed_window (top-level), sympy.plotting.pygletplot.plot_controller (top-level), sympy.printing.preview (delayed, optional)
|
||||
missing module named pyglet - imported by sympy.plotting.pygletplot.plot (optional), sympy.plotting.pygletplot.plot_axes (top-level), sympy.printing.preview (delayed, conditional, optional), sympy.testing.runtests (delayed, conditional)
|
||||
missing module named 'pyglet.gl' - imported by sympy.plotting.pygletplot.plot_axes (top-level), sympy.plotting.pygletplot.util (top-level), sympy.plotting.pygletplot.plot_window (top-level), sympy.plotting.pygletplot.plot_camera (top-level), sympy.plotting.pygletplot.plot_rotation (top-level), sympy.plotting.pygletplot.plot_curve (top-level), sympy.plotting.pygletplot.plot_mode_base (top-level), sympy.plotting.pygletplot.plot_surface (top-level)
|
||||
missing module named 'pyglet.clock' - imported by sympy.plotting.pygletplot.managed_window (top-level)
|
||||
missing module named 'sage.libs' - imported by mpmath.libmp.backend (conditional, optional), mpmath.libmp.libelefun (conditional, optional), mpmath.libmp.libmpf (conditional, optional), mpmath.libmp.libmpc (conditional, optional), mpmath.libmp.libhyper (delayed, conditional), mpmath.ctx_mp (conditional)
|
||||
missing module named sage - imported by mpmath.libmp.backend (conditional, optional)
|
||||
missing module named gmpy - imported by mpmath.libmp.backend (conditional, optional)
|
||||
missing module named pysat - imported by sympy.logic.algorithms.minisat22_wrapper (delayed)
|
||||
missing module named pycosat - imported by sympy.logic.algorithms.pycosat_wrapper (delayed)
|
||||
missing module named flint - imported by sympy.external.gmpy (delayed, optional), sympy.polys.polyutils (conditional), sympy.polys.factortools (conditional), sympy.polys.polyclasses (conditional), sympy.polys.domains.groundtypes (conditional), sympy.polys.domains.finitefield (conditional)
|
||||
missing module named all - imported by sympy.testing.runtests (delayed, optional)
|
||||
missing module named 'IPython.Shell' - imported by sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.frontend' - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.terminal' - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.iplib' - imported by sympy.interactive.printing (delayed, optional)
|
||||
missing module named py - imported by mpmath.tests.runtests (delayed, conditional)
|
||||
missing module named 'sage.all' - imported by sympy.core.function (delayed)
|
||||
missing module named 'sage.interfaces' - imported by sympy.core.basic (delayed)
|
||||
missing module named 'cutlass_library.gemm_operation' - imported by torch._inductor.codegen.cuda.gemm_template (delayed), torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions (conditional)
|
||||
missing module named 'cutlass_library.library' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional), torch._inductor.codegen.cuda.gemm_template (delayed), torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions (conditional)
|
||||
missing module named 'cutlass_library.generator' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed)
|
||||
missing module named 'cutlass_library.manifest' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional)
|
||||
missing module named cutlass_library - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional)
|
||||
missing module named torch.multiprocessing._prctl_pr_set_pdeathsig - imported by torch.multiprocessing (top-level), torch.multiprocessing.spawn (top-level)
|
||||
missing module named 'torch.utils._config_typing' - imported by torch._dynamo.config (conditional), torch._inductor.config (conditional), torch._functorch.config (conditional)
|
||||
missing module named 'torch._C._functorch' - imported by torch._subclasses.fake_tensor (top-level), torch._subclasses.meta_utils (top-level), torch._functorch.pyfunctorch (top-level), torch._higher_order_ops.cond (top-level), torch._functorch.autograd_function (top-level), torch._functorch.utils (top-level), torch._functorch.vmap (top-level), torch._functorch.eager_transforms (top-level)
|
||||
missing module named torch.trunc - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.tanh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.tan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.square - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sqrt - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sinh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sin - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.signbit - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sign - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.round - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.reciprocal - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.rad2deg - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.negative - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_not - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log2 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log1p - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log10 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isnan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isinf - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isfinite - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.floor - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.expm1 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.exp2 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.exp - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.deg2rad - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.cosh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.cos - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.conj_physical - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.ceil - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_not - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctanh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arcsinh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arcsin - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arccosh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arccos - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.absolute - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.true_divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.subtract - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.remainder - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.pow - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.not_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.nextafter - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.multiply - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.minimum - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.maximum - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_xor - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_or - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_and - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logaddexp2 - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logaddexp - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.less_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.less - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.ldexp - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.lcm - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.hypot - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.heaviside - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.greater_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.greater - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.gcd - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmod - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmin - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmax - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.floor_divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.float_power - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.eq - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.copysign - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_xor - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_right_shift - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_or - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_left_shift - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_and - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctan2 - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.add - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch_xla - imported by torch._functorch.fx_minifier (delayed), huggingface_hub.serialization._torch (delayed, conditional)
|
||||
missing module named deeplearning - imported by torch._inductor.fx_passes.group_batch_fusion (optional)
|
||||
missing module named torch._inductor.fx_passes.fb - imported by torch._inductor.fx_passes (delayed, conditional), torch._inductor.fx_passes.pre_grad (delayed, conditional)
|
||||
missing module named 'torch_xla.distributed' - imported by torch.distributed.tensor._api (delayed, conditional, optional)
|
||||
missing module named torchdistx - imported by torch.distributed.fsdp._init_utils (optional)
|
||||
missing module named 'torch._C._distributed_rpc' - imported by torch.distributed.rpc (conditional), torch.distributed.rpc.api (top-level), torch.distributed.rpc.constants (top-level), torch.distributed.rpc.internal (top-level), torch.distributed.rpc.options (top-level), torch._jit_internal (conditional)
|
||||
missing module named foo - imported by torch._functorch.compilers (delayed)
|
||||
missing module named torch.broadcast_shapes - imported by torch (top-level), torch._numpy._funcs_impl (top-level)
|
||||
missing module named torch._numpy.float_ - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.max - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.isnan - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.signbit - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.real - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.isscalar - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.iscomplexobj - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.imag - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.intp - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named torch._numpy.empty - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named torch._numpy.arange - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named 'onnxscript._framework_apis' - imported by torch.onnx._internal._exporter_legacy (delayed), torch.onnx._internal._lazy_import (conditional)
|
||||
missing module named onnxscript - imported by torch.onnx._internal.fx.registration (conditional), torch.onnx._internal._exporter_legacy (delayed, conditional, optional), torch.onnx._internal.fx.diagnostics (top-level), torch.onnx._internal.fx.onnxfunction_dispatcher (conditional), torch.onnx._internal.fx.fx_onnx_interpreter (top-level), torch.onnx._internal.onnxruntime (delayed, conditional, optional), torch.onnx._internal._lazy_import (conditional), torch.onnx._internal.exporter._core (top-level), torch.onnx._internal.exporter._dispatching (top-level), torch.onnx._internal.exporter._schemas (top-level), torch.onnx._internal.exporter._registration (conditional), torch.onnx._internal.exporter._building (top-level), torch.onnx._internal.exporter._tensors (top-level), torch.onnx._internal.exporter._ir_passes (top-level), torch.onnx._internal.exporter._reporting (conditional)
|
||||
missing module named 'onnx.onnx_cpp2py_export.defs' - imported by onnx.defs (top-level), onnx.reference.ops._op_list (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.version_converter' - imported by onnx.version_converter (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.shape_inference' - imported by onnx.shape_inference (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.printer' - imported by onnx.printer (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.parser' - imported by onnx.parser (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.checker' - imported by onnx.checker (top-level)
|
||||
missing module named pyinstrument - imported by torch.onnx._internal.exporter._core (delayed, conditional)
|
||||
missing module named 'onnxscript.ir' - imported by torch.onnx._internal.exporter._core (top-level), torch.onnx._internal.exporter._building (top-level)
|
||||
missing module named 'onnxscript.evaluator' - imported by torch.onnx._internal.exporter._core (top-level)
|
||||
missing module named onnxruntime.capi.build_and_package_info - imported by onnxruntime.capi.onnxruntime_validation (delayed, conditional, optional)
|
||||
missing module named 'onnxruntime.training' - imported by onnxruntime.capi.onnxruntime_validation (delayed, optional)
|
||||
missing module named 'onnxscript.function_libs' - imported by torch.onnx._internal.fx.diagnostics (top-level), torch.onnx._internal.fx.onnxfunction_dispatcher (conditional), torch.onnx._internal.fx.decomposition_skip (top-level), torch.onnx._internal.fx.fx_onnx_interpreter (top-level), torch.onnx._internal.exporter._ir_passes (delayed, optional)
|
||||
missing module named 'onnx.defs.OpSchema' - imported by torch.onnx._internal.fx.type_utils (conditional)
|
||||
missing module named transformers - imported by torch.onnx._internal.fx.patcher (delayed, conditional, optional), torch.onnx._internal.fx.dynamo_graph_extractor (delayed, optional), nncf.data.generators (delayed, optional), torch._dynamo.variables.dicts (delayed), torch.testing._internal.common_distributed (delayed, optional)
|
||||
missing module named accimage - imported by torchvision.transforms.transforms (optional), torchvision.transforms.functional (optional), torchvision.transforms._functional_pil (optional), torchvision.datasets.folder (delayed)
|
||||
missing module named torch.ao.quantization.QuantStub - imported by torch.ao.quantization (top-level), torchvision.models.quantization.mobilenetv2 (top-level), torchvision.models.quantization.mobilenetv3 (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.DeQuantStub - imported by torch.ao.quantization (top-level), torchvision.models.quantization.mobilenetv2 (top-level), torchvision.models.quantization.mobilenetv3 (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named 'monkeytype.tracing' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'monkeytype.db' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'monkeytype.config' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named monkeytype - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'torch._C._jit_tree_views' - imported by torch._sources (top-level), torch.jit.frontend (top-level)
|
||||
missing module named wcwidth - imported by tabulate (optional)
|
||||
missing module named torch.ao.quantization.QConfig - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.qconfig_mapping_utils (top-level), torch.ao.quantization.fx.lstm_utils (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QConfigMapping - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.custom_config (top-level), torch.ao.ns.fx.n_shadows_utils (top-level), torch.ao.ns.fx.qconfig_multi_mapping (top-level), torch.ao.ns._numeric_suite_fx (top-level), torch.ao.quantization.fx.lstm_utils (top-level), torch.ao.quantization.pt2e.prepare (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QuantType - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.utils (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QConfigAny - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.utils (top-level)
|
||||
missing module named torch.ao.quantization.float_qparams_weight_only_qconfig - imported by torch.ao.quantization (delayed, conditional), torch.ao.nn.quantized.modules.embedding_ops (delayed, conditional), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named pycocotools - imported by torchvision.datasets.coco (delayed), torchvision.tv_tensors._dataset_wrapper (delayed)
|
||||
missing module named gdown - imported by torchvision.datasets.utils (delayed, optional)
|
||||
missing module named 'IPython.utils' - imported by h5py.ipy_completer (top-level)
|
||||
missing module named mpi4py - imported by h5py._hl.files (delayed)
|
||||
missing module named lmdb - imported by torchvision.datasets.lsun (delayed)
|
||||
missing module named 'onnxscript.rewriter' - imported by torch.onnx._internal.onnxruntime (delayed, conditional, optional)
|
||||
missing module named 'torch._C._onnx' - imported by torch.onnx (top-level), torch.onnx.utils (top-level), torch.onnx.symbolic_helper (top-level), torch.onnx._globals (top-level), torch.onnx.symbolic_opset9 (top-level), torch.onnx.symbolic_opset10 (top-level), torch.onnx.symbolic_opset13 (top-level), torch.onnx._experimental (top-level), torch.onnx.verification (top-level)
|
||||
missing module named torchrec - imported by torch._dynamo.variables.user_defined (delayed)
|
||||
missing module named 'torch._C._lazy_ts_backend' - imported by torch._lazy.ts_backend (top-level), torch._lazy.computation (top-level)
|
||||
missing module named 'torch._C._lazy' - imported by torch._lazy (top-level), torch._lazy.device_context (top-level), torch._lazy.metrics (top-level), torch._lazy.computation (top-level), torch._lazy.config (top-level), torch._lazy.debug (top-level), torch._lazy.ir_cache (top-level)
|
||||
missing module named hypothesis - imported by torch.testing._internal.common_utils (optional), torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named 'numba.cuda' - imported by torch.testing._internal.common_cuda (conditional, optional)
|
||||
missing module named 'xmlrunner.result' - imported by torch.testing._internal.common_utils (delayed, conditional)
|
||||
missing module named xmlrunner - imported by torch.testing._internal.common_utils (delayed, conditional)
|
||||
missing module named expecttest - imported by torch.testing._internal.common_utils (top-level)
|
||||
missing module named '_pytest.recwarn' - imported by torch._dynamo.variables.user_defined (delayed, optional)
|
||||
missing module named _pytest - imported by torch._dynamo.variables.user_defined (delayed, optional)
|
||||
missing module named 'torch._C._dynamo' - imported by torch._guards (top-level), torch._dynamo.convert_frame (top-level), torch._dynamo.guards (top-level), torch._dynamo.eval_frame (top-level), torch._dynamo.decorators (conditional), torch._dynamo.types (top-level)
|
||||
missing module named pygraphviz - imported by networkx.drawing.nx_agraph (delayed, optional)
|
||||
missing module named 'triton.backends' - imported by torch._inductor.runtime.triton_heuristics (conditional, optional)
|
||||
missing module named 'triton.testing' - imported by torch._inductor.runtime.benchmarking (delayed, optional), torch._inductor.utils (delayed)
|
||||
missing module named 'torch_xla.core' - imported by huggingface_hub.serialization._torch (delayed, conditional, optional), torch._dynamo.testing (delayed, conditional), torch._dynamo.backends.torchxla (delayed, optional)
|
||||
missing module named torch.float16 - imported by torch (delayed, conditional), torch._inductor.codegen.cpp_wrapper_cuda (delayed, conditional)
|
||||
missing module named torch.bfloat16 - imported by torch (delayed, conditional), torch._inductor.codegen.cpp_wrapper_cuda (delayed, conditional)
|
||||
missing module named torch.ScriptObject - imported by torch (delayed), torch.export.graph_signature (delayed)
|
||||
missing module named moviepy - imported by torch.utils.tensorboard.summary (delayed, optional)
|
||||
missing module named 'torch._C._monitor' - imported by torch.monitor (top-level)
|
||||
missing module named 'libfb.py' - imported by torch._dynamo.debug_utils (conditional), torch._inductor.codecache (delayed, conditional), torch._inductor.compile_worker.subproc_pool (delayed, conditional)
|
||||
missing module named 'torch._inductor.fb' - imported by torch._inductor.runtime.autotune_cache (delayed, conditional, optional), torch._inductor.cpp_builder (conditional), torch._inductor.graph (conditional), torch._inductor.codecache (delayed, conditional, optional), torch._inductor.compile_fx (delayed, conditional, optional)
|
||||
missing module named 'triton.fb' - imported by torch._inductor.cpp_builder (conditional), torch._inductor.codecache (conditional)
|
||||
missing module named rfe - imported by torch._inductor.remote_cache (conditional)
|
||||
missing module named redis - imported by torch._inductor.remote_cache (optional)
|
||||
missing module named 'ck4inductor.universal_gemm' - imported by torch._inductor.utils (delayed, optional)
|
||||
missing module named ck4inductor - imported by torch._inductor.utils (delayed, optional)
|
||||
missing module named libfb - imported by torch._inductor.config (conditional, optional)
|
||||
missing module named amdsmi - imported by torch.cuda (conditional, optional), torch.cuda.memory (delayed, conditional, optional)
|
||||
missing module named pynvml - imported by torch.cuda (delayed, conditional, optional), torch.cuda.memory (delayed, conditional, optional)
|
||||
missing module named torch.device - imported by torch (top-level), torch.types (top-level), torch.nn.modules.module (top-level), torch.cuda (top-level), torch._library.infer_schema (top-level), torch._inductor.graph (top-level), torch.distributed.nn.api.remote_module (top-level), torch.xpu (top-level), torch.cpu (top-level), torch.mtia (top-level)
|
||||
missing module named 'torch._C._profiler' - imported by torch.utils._traceback (delayed), torch.profiler (top-level), torch.autograd.profiler (top-level), torch.profiler.profiler (top-level), torch.profiler._memory_profiler (top-level), torch.cuda._memory_viz (delayed), torch.testing._internal.logging_tensor (top-level), torch.autograd (top-level), torch.profiler._pattern_matcher (top-level)
|
||||
missing module named 'torch._C._autograd' - imported by torch._subclasses.meta_utils (top-level), torch.profiler (top-level), torch.profiler._memory_profiler (top-level), torch.autograd (top-level)
|
||||
missing module named z3 - imported by torch.fx.experimental.validator (optional), torch.fx.experimental.migrate_gradual_types.transform_to_z3 (optional), torch.fx.experimental.migrate_gradual_types.z3_types (optional)
|
||||
missing module named torch.Size - imported by torch (top-level), torch.types (top-level), torch.nn.modules.normalization (top-level)
|
||||
missing module named torch.nn.Sequential - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ParameterList - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ParameterDict - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ModuleList - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ModuleDict - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ReLU - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Linear - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv3d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv2d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv1d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm3d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm2d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm1d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Module - imported by torch.nn (top-level), torch.optim.swa_utils (top-level), torch.ao.quantization.fake_quantize (top-level), torch.jit._recursive (top-level), torch.jit._script (top-level), torch.jit._trace (top-level), torch._dynamo.mutation_guard (top-level), torch.fx.passes.utils.common (top-level), torch.distributed.nn.api.remote_module (top-level), torchaudio.models.wav2vec2.utils.import_fairseq (top-level), torchaudio.models.wav2vec2.model (top-level), torchaudio.models.wav2vec2.components (top-level), torchaudio.models.wav2vec2.utils.import_huggingface (top-level), torchaudio.pipelines._wav2vec2.impl (top-level), nncf.torch.utils (top-level), nncf.torch.debug (top-level), nncf.common.factory (delayed, conditional), nncf.torch.model_creation (top-level), torch.fx.experimental.proxy_tensor (top-level)
|
||||
missing module named torch.qscheme - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torch.layout - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torch.DispatchKey - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torchaudio._internal.fb - imported by torchaudio._internal (optional)
|
||||
missing module named sentencepiece - imported by torchaudio.pipelines.rnnt_pipeline (delayed)
|
||||
missing module named dp - imported by torchaudio.pipelines._tts.utils (delayed)
|
||||
missing module named kaldi_io - imported by torchaudio.kaldi_io (delayed)
|
||||
missing module named av.video._VideoCodecName - imported by av.video (top-level), av.codec.context (top-level), av.container.output (top-level)
|
||||
missing module named av.audio._AudioCodecName - imported by av.audio (top-level), av.codec.context (top-level), av.container.output (top-level)
|
||||
missing module named torcharrow - imported by torch.utils.data.datapipes.iter.callable (delayed, conditional, optional)
|
||||
missing module named _dbm - imported by dbm.ndbm (top-level)
|
||||
missing module named _gdbm - imported by dbm.gnu (top-level)
|
||||
missing module named diff - imported by dill._dill (delayed, conditional, optional)
|
||||
missing module named dill.diff - imported by dill (delayed, conditional, optional), dill._dill (delayed, conditional, optional)
|
||||
missing module named version - imported by dill (optional)
|
||||
missing module named 'jax.typing' - imported by optree.integrations.jax (top-level)
|
||||
missing module named 'jax._src' - imported by optree.integrations.jax (top-level), keras.src.backend.jax.nn (delayed, optional)
|
||||
missing module named 'torch._C._distributed_autograd' - imported by torch.distributed.autograd (conditional)
|
||||
missing module named 'einops._torch_specific' - imported by torch._dynamo.decorators (delayed, optional)
|
||||
missing module named einops - imported by torch._dynamo.decorators (delayed)
|
||||
missing module named 'tensorflow.saved_model' - imported by keras.src.export.saved_model (delayed)
|
||||
missing module named keras.src.backend.random_seed_dtype - imported by keras.src.backend (delayed), keras.src.random.seed_generator (delayed)
|
||||
missing module named keras.src.backend.convert_to_tensor - imported by keras.src.backend (delayed), keras.src.random.seed_generator (delayed)
|
||||
missing module named 'openvino._pyopenvino.util' - imported by openvino.utils (delayed), openvino.runtime.utils (top-level)
|
||||
missing module named 'openvino._pyopenvino.op' - imported by openvino.runtime.op (top-level), openvino.runtime.op.util (top-level), nncf.openvino.optimized_functions.models (top-level)
|
||||
missing module named 'jax.nn' - imported by keras.src.backend.jax.nn (delayed, optional)
|
||||
missing module named 'jax.scipy' - imported by keras.src.backend.jax.linalg (top-level)
|
||||
missing module named 'tensorflow.experimental' - imported by keras.src.backend.tensorflow.distribution_lib (top-level)
|
||||
missing module named 'tensorflow.summary' - imported by keras.src.callbacks.tensorboard (delayed, conditional)
|
||||
missing module named pygments.lexers.PrologLexer - imported by pygments.lexers (top-level), pygments.lexers.cplint (top-level)
|
||||
missing module named ctags - imported by pygments.formatters.html (optional)
|
||||
missing module named linkify_it - imported by markdown_it.main (optional)
|
||||
missing module named pydantic - imported by huggingface_hub.utils._runtime (delayed, optional), huggingface_hub._webhooks_payload (conditional)
|
||||
missing module named 'google.colab' - imported by huggingface_hub.utils._auth (delayed, optional), plotly.io._renderers (conditional, optional)
|
||||
missing module named hf_transfer - imported by huggingface_hub.file_download (delayed, conditional, optional), huggingface_hub.lfs (delayed, optional)
|
||||
missing module named hf_xet - imported by huggingface_hub.file_download (delayed, optional), huggingface_hub._commit_api (delayed)
|
||||
missing module named 'mcp.client' - imported by huggingface_hub.inference._mcp.mcp_client (delayed, conditional)
|
||||
missing module named mcp - imported by huggingface_hub.inference._mcp.utils (conditional), huggingface_hub.inference._mcp.mcp_client (delayed, conditional)
|
||||
missing module named fastai - imported by huggingface_hub.fastai_utils (delayed)
|
||||
missing module named 'fastapi.responses' - imported by huggingface_hub._oauth (delayed, optional), huggingface_hub._webhooks_server (conditional)
|
||||
missing module named fastapi - imported by huggingface_hub._oauth (delayed, conditional, optional), huggingface_hub._webhooks_server (conditional)
|
||||
missing module named gradio - imported by huggingface_hub._webhooks_server (delayed, conditional)
|
||||
missing module named tensorboardX - imported by huggingface_hub._tensorboard_logger (conditional, optional)
|
||||
missing module named 'starlette.datastructures' - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named 'authlib.integrations' - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named authlib - imported by huggingface_hub._oauth (delayed, optional), streamlit.auth_util (delayed, optional)
|
||||
missing module named starlette - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named 'ipywidgets.widgets' - imported by huggingface_hub._login (delayed, optional)
|
||||
missing module named 'InquirerPy.separator' - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named 'InquirerPy.base' - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named InquirerPy - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named pydotplus - imported by keras.src.utils.model_visualization (optional), tensorflow.python.keras.utils.vis_utils (optional)
|
||||
missing module named pydot_ng - imported by keras.src.utils.model_visualization (optional), tensorflow.python.keras.utils.vis_utils (optional)
|
||||
missing module named keras.src.ops.convert_to_tensor - imported by keras.src.ops (top-level), keras.src.utils.torch_utils (top-level)
|
||||
missing module named keras.src.ops.convert_to_numpy - imported by keras.src.ops (top-level), keras.src.utils.torch_utils (top-level)
|
||||
missing module named keras.src.backend.random - imported by keras.src.backend (top-level), keras.src.ops (top-level), keras.src.testing.test_case (delayed), keras.src.initializers.random_initializers (top-level)
|
||||
missing module named keras.src.backend.is_tensor - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.backend.cond - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.backend.cast - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.engine - imported by keras.src (conditional), nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named flax - imported by keras.src.utils.jax_layer (delayed)
|
||||
missing module named array_api_strict - imported by sklearn.utils._array_api (delayed, conditional, optional)
|
||||
missing module named sklearn.externals.array_api_compat.common.array_namespace - imported by sklearn.externals.array_api_compat.common (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named cupy_backends - imported by sklearn.externals.array_api_compat.common._helpers (delayed)
|
||||
missing module named torch.outer - imported by torch (top-level), sklearn.externals.array_api_compat.torch.linalg (top-level)
|
||||
missing module named 'cupy.linalg' - imported by sklearn.externals.array_api_compat.cupy.linalg (top-level)
|
||||
missing module named 'cupy.fft' - imported by sklearn.externals.array_api_compat.cupy.fft (top-level)
|
||||
missing module named array_api_compat - imported by sklearn.externals.array_api_extra._lib._utils._compat (optional)
|
||||
missing module named 'numpydoc.docscrape' - imported by sklearn.utils._testing (delayed), skimage._shared.utils (delayed, optional)
|
||||
missing module named numpydoc - imported by sklearn.utils._testing (delayed, optional)
|
||||
missing module named 'distributed.utils' - imported by joblib._dask (conditional, optional)
|
||||
missing module named 'dask.utils' - imported by joblib._dask (conditional)
|
||||
missing module named 'dask.sizeof' - imported by joblib._dask (conditional)
|
||||
missing module named 'dask.distributed' - imported by joblib._dask (conditional)
|
||||
missing module named viztracer - imported by joblib.externals.loky.initializers (delayed, optional)
|
||||
missing module named 'lz4.frame' - imported by joblib.compressor (optional)
|
||||
missing module named pyamg - imported by sklearn.manifold._spectral_embedding (delayed, conditional, optional)
|
||||
missing module named keras.engine - imported by keras (conditional), nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'tf_keras.optimizers' - imported by tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named tf_keras - imported by tensorflow.python.util.lazy_loader (delayed, conditional, optional), tensorflow.python.saved_model.load (delayed, conditional, optional), huggingface_hub.keras_mixin (conditional, optional)
|
||||
missing module named objgraph - imported by tensorflow.python.distribute.test_util (optional)
|
||||
missing module named tblib - imported by tensorflow.python.distribute.multi_process_runner (optional)
|
||||
missing module named tensorflow.python.framework.fast_tensor_util - imported by tensorflow.python.framework (optional), tensorflow.python.framework.tensor_util (optional)
|
||||
missing module named portpicker - imported by tensorflow.python.framework.test_util (delayed), tensorflow.dtensor.python.tests.multi_client_test_util (top-level), tensorflow.python.debug.lib.grpc_debug_test_server (top-level)
|
||||
missing module named 'tensorflow.python.framework.is_mlir_bridge_test_true' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'tensorflow.python.framework.is_mlir_bridge_test_false' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'tensorflow.python.framework.is_xla_test_true' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named tensorflow.python.keras.__version__ - imported by tensorflow.python.keras (delayed), tensorflow.python.keras.saving.saving_utils (delayed), tensorflow.python.keras.saving.hdf5_format (delayed), tensorflow.python.keras.engine.training (delayed)
|
||||
missing module named tensorflow.python.keras.layers.wrappers - imported by tensorflow.python.keras.layers (delayed), tensorflow.python.keras.utils.vis_utils (delayed)
|
||||
missing module named 'six.moves.urllib.request' - imported by tensorflow.python.keras.utils.data_utils (top-level)
|
||||
missing module named 'tensorflow.python.training.tracking' - imported by openvino.frontend.tensorflow.utils (delayed, optional)
|
||||
missing module named paddle - imported by openvino.tools.ovc.moc_frontend.shape_utils (delayed, conditional), openvino.tools.ovc.moc_frontend.type_utils (delayed, conditional), openvino.tools.ovc.moc_frontend.paddle_frontend_utils (delayed, optional), openvino.tools.ovc.convert_impl (delayed, conditional)
|
||||
missing module named 'conda.cli' - imported by torch.utils.benchmark.examples.blas_compare_setup (optional)
|
||||
missing module named conda - imported by torch.utils.benchmark.examples.blas_compare_setup (optional)
|
||||
missing module named 'hypothesis.strategies' - imported by torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named 'hypothesis.extra' - imported by torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named torch.tensor - imported by torch (top-level), torch.utils.benchmark.utils.compare (top-level)
|
||||
missing module named torch.TensorType - imported by torch (top-level), torch.jit._passes._property_propagation (top-level)
|
||||
missing module named 'torch._C._distributed_rpc_testing' - imported by torch.distributed.rpc._testing (conditional)
|
||||
missing module named etcd - imported by torch.distributed.elastic.rendezvous.etcd_rendezvous (top-level), torch.distributed.elastic.rendezvous.etcd_store (top-level), torch.distributed.elastic.rendezvous.etcd_rendezvous_backend (top-level), torch.distributed.elastic.rendezvous.etcd_server (optional)
|
||||
missing module named 'torch.distributed.elastic.metrics.static_init' - imported by torch.distributed.elastic.metrics (optional)
|
||||
missing module named 'coremltools.models' - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named 'coremltools.converters' - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named coremltools - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named pytorch_lightning - imported by torch.ao.pruning._experimental.data_sparsifier.lightning.callbacks.data_sparsity (top-level)
|
||||
missing module named fbscribelogger - imported by torch._logging.scribe (optional)
|
||||
missing module named 'tvm.contrib' - imported by torch._dynamo.backends.tvm (delayed)
|
||||
missing module named tvm - imported by torch._dynamo.backends.tvm (delayed, conditional)
|
||||
missing module named 'torch._C._VariableFunctions' - imported by torch (conditional)
|
||||
missing module named 'tensorflow.contrib' - imported by tensorflow.python.tools.import_pb_to_tensorboard (optional)
|
||||
missing module named memory_profiler - imported by tensorflow.python.eager.memory_tests.memory_test_util (optional)
|
||||
missing module named six.moves.urllib.request - imported by six.moves.urllib (top-level), tensorflow.python.distribute.failure_handling.failure_handling_util (top-level)
|
||||
missing module named grpc_reflection - imported by grpc (optional)
|
||||
missing module named grpc_health - imported by grpc (optional)
|
||||
missing module named grpc_tools - imported by grpc._runtime_protos (delayed, optional), grpc (optional)
|
||||
missing module named 'grpc_tools.protoc' - imported by grpc._runtime_protos (delayed, conditional)
|
||||
missing module named tflite_runtime - imported by tensorflow.lite.python.metrics.metrics (conditional), tensorflow.lite.python.interpreter (conditional), tensorflow.lite.python.analyzer (conditional), tensorflow.lite.tools.visualize (conditional)
|
||||
missing module named awq - imported by openvino.frontend.pytorch.quantized (delayed, conditional, optional)
|
||||
missing module named 'transformers.pytorch_utils' - imported by openvino.frontend.pytorch.patch_model (delayed, optional)
|
||||
missing module named 'jax.lax' - imported by openvino.frontend.jax.passes (top-level)
|
||||
missing module named 'jax.core' - imported by openvino.frontend.jax.jaxpr_decoder (top-level)
|
||||
missing module named 'keras.src.utils.control_flow_util' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.src.engine.keras_tensor' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.utils.control_flow_util' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.engine.keras_tensor' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named rpds.List - imported by rpds (top-level), referencing._core (top-level)
|
||||
missing module named rpds.HashTrieSet - imported by rpds (top-level), referencing._core (top-level)
|
||||
missing module named rpds.HashTrieMap - imported by rpds (top-level), referencing._core (top-level), jsonschema._types (top-level), jsonschema.validators (top-level)
|
||||
missing module named isoduration - imported by jsonschema._format (top-level)
|
||||
missing module named uri_template - imported by jsonschema._format (top-level)
|
||||
missing module named jsonpointer - imported by jsonschema._format (top-level)
|
||||
missing module named webcolors - imported by jsonschema._format (top-level)
|
||||
missing module named rfc3339_validator - imported by jsonschema._format (top-level)
|
||||
missing module named rfc3986_validator - imported by jsonschema._format (optional)
|
||||
missing module named rfc3987 - imported by jsonschema._format (optional)
|
||||
missing module named fqdn - imported by jsonschema._format (top-level)
|
||||
missing module named openvino.properties.hint.inference_precision - imported by openvino.properties.hint (top-level), nncf.quantization.algorithms.accuracy_control.openvino_backend (top-level), nncf.openvino.engine (top-level)
|
||||
missing module named 'openvino._pyopenvino.properties' - imported by openvino.runtime.properties (top-level), openvino.runtime.properties.hint (top-level), openvino.properties (top-level), openvino.properties.hint (top-level), openvino.properties.intel_cpu (top-level), openvino.properties.intel_gpu (top-level), openvino.properties.intel_auto (top-level), openvino.properties.device (top-level), openvino.properties.log (top-level), openvino.properties.streams (top-level), nncf.openvino.optimized_functions.models (top-level)
|
||||
missing module named 'openvino._pyopenvino._offline_transformations' - imported by openvino._offline_transformations (top-level)
|
||||
missing module named 'transformers.utils' - imported by nncf.data.generators (delayed, optional)
|
||||
missing module named icu - imported by natsort.compat.locale (optional), natsort.natsort (conditional, optional)
|
||||
missing module named fastnumbers - imported by natsort.compat.fastnumbers (conditional, optional)
|
||||
missing module named 'openvino._pyopenvino.preprocess' - imported by openvino.preprocess (top-level)
|
||||
missing module named gitdb_speedups - imported by gitdb.fun (optional)
|
||||
missing module named 'gitdb_speedups._perf' - imported by gitdb.stream (optional), gitdb.pack (optional)
|
||||
missing module named sha - imported by gitdb.util (delayed, optional)
|
||||
missing module named _watchdog_fsevents - imported by watchdog.observers.fsevents (top-level)
|
||||
missing module named polars - imported by narwhals.dependencies (conditional), narwhals.utils (delayed, conditional), narwhals.schema (delayed, conditional), narwhals._compliant.series (conditional), narwhals._arrow.dataframe (delayed, conditional), narwhals._pandas_like.series (delayed, conditional), narwhals._pandas_like.dataframe (delayed, conditional), narwhals._polars.dataframe (top-level), narwhals._polars.namespace (top-level), narwhals._polars.expr (top-level), narwhals._polars.utils (top-level), narwhals._polars.series (top-level), narwhals._dask.dataframe (delayed, conditional), narwhals._duckdb.dataframe (delayed, conditional), narwhals._arrow.series (delayed, conditional), narwhals.series (conditional), narwhals.dataframe (conditional), narwhals._compliant.dataframe (conditional), narwhals._namespace (conditional), narwhals._ibis.dataframe (delayed, conditional), narwhals._spark_like.dataframe (delayed, conditional), streamlit.dataframe_util (delayed, conditional), streamlit.runtime.caching.hashing (delayed, conditional)
|
||||
missing module named xarray - imported by plotly.express._imshow (optional), streamlit.dataframe_util (delayed, conditional)
|
||||
missing module named 'authlib.jose' - imported by streamlit.auth_util (delayed, optional)
|
||||
missing module named sniffio - imported by tenacity.asyncio (delayed, conditional)
|
||||
missing module named trio - imported by tenacity.asyncio (delayed, conditional)
|
||||
missing module named 'sqlalchemy.exc' - imported by streamlit.connections.sql_connection (delayed)
|
||||
missing module named 'sqlalchemy.orm' - imported by streamlit.connections.sql_connection (delayed, conditional)
|
||||
missing module named snowflake - imported by streamlit.connections.util (delayed, optional)
|
||||
missing module named 'snowflake.snowpark' - imported by streamlit.connections.snowflake_connection (delayed, conditional), streamlit.connections.snowpark_connection (delayed, conditional)
|
||||
missing module named 'snowflake.connector' - imported by streamlit.connections.snowflake_connection (delayed, conditional)
|
||||
missing module named 'pyarrow._stubs_typing' - imported by narwhals._arrow.typing (conditional)
|
||||
missing module named 'pyarrow.__lib_pxi' - imported by narwhals._arrow.typing (conditional)
|
||||
missing module named dask_expr - imported by narwhals._dask.utils (conditional, optional), narwhals._dask.group_by (conditional, optional)
|
||||
missing module named 'polars.lazyframe' - imported by narwhals._polars.group_by (conditional)
|
||||
missing module named 'polars.dataframe' - imported by narwhals._polars.group_by (conditional)
|
||||
missing module named 'duckdb.typing' - imported by narwhals._duckdb.utils (conditional), narwhals._duckdb.expr (top-level), narwhals._duckdb.namespace (top-level), narwhals._duckdb.dataframe (conditional)
|
||||
missing module named 'sqlframe._version' - imported by narwhals.utils (delayed, conditional)
|
||||
missing module named ibis - imported by narwhals.dependencies (conditional), narwhals.utils (delayed, conditional), narwhals._ibis.namespace (top-level), narwhals._ibis.dataframe (top-level), narwhals._ibis.utils (top-level), narwhals._ibis.expr (top-level)
|
||||
missing module named sqlframe - imported by narwhals.utils (delayed, conditional)
|
||||
missing module named duckdb - imported by narwhals.dependencies (conditional), narwhals._arrow.dataframe (delayed, conditional), narwhals._duckdb.dataframe (top-level), narwhals._duckdb.utils (top-level), narwhals._duckdb.expr (top-level), narwhals._duckdb.expr_dt (top-level), narwhals._duckdb.expr_list (top-level), narwhals._duckdb.expr_str (top-level), narwhals._duckdb.expr_struct (top-level), narwhals._duckdb.namespace (top-level), narwhals._duckdb.selectors (conditional), narwhals._duckdb.group_by (conditional), narwhals._duckdb.series (conditional), narwhals._polars.dataframe (delayed, conditional), narwhals._pandas_like.dataframe (delayed, conditional), narwhals.utils (delayed, conditional), narwhals._namespace (conditional)
|
||||
missing module named 'dask.dataframe' - imported by narwhals.dependencies (conditional), narwhals._dask.namespace (top-level), narwhals._polars.dataframe (delayed, conditional), narwhals._dask.dataframe (top-level), narwhals._dask.utils (conditional, optional), narwhals._dask.expr_dt (conditional), narwhals._dask.expr_str (top-level), narwhals._dask.expr (conditional), narwhals._dask.group_by (top-level), narwhals._pandas_like.dataframe (delayed, conditional), narwhals._arrow.dataframe (delayed, conditional), narwhals._dask.selectors (conditional), narwhals.utils (delayed, conditional)
|
||||
missing module named 'pyspark.sql' - imported by narwhals.dependencies (delayed, conditional, optional), narwhals.utils (delayed, conditional), narwhals._namespace (conditional), narwhals._spark_like.utils (delayed, conditional)
|
||||
missing module named cudf - imported by narwhals.dependencies (conditional), narwhals.utils (delayed, conditional)
|
||||
missing module named 'modin.pandas' - imported by narwhals._pandas_like.dataframe (delayed, conditional), narwhals.utils (delayed, conditional)
|
||||
missing module named 'sqlframe.base' - imported by narwhals._spark_like.utils (delayed, conditional), narwhals._spark_like.expr_dt (conditional), narwhals._spark_like.expr_str (conditional), narwhals._spark_like.expr_struct (conditional), narwhals._spark_like.expr (delayed, conditional), narwhals._spark_like.selectors (conditional), narwhals._spark_like.namespace (conditional), narwhals._spark_like.dataframe (delayed, conditional), narwhals._spark_like.group_by (conditional), narwhals.dependencies (delayed, conditional)
|
||||
missing module named 'ibis.selectors' - imported by narwhals._ibis.dataframe (delayed)
|
||||
missing module named 'ibis.expr' - imported by narwhals._ibis.namespace (top-level), narwhals._ibis.dataframe (top-level), narwhals._ibis.utils (top-level), narwhals._ibis.expr_dt (conditional), narwhals._ibis.expr_str (top-level), narwhals._ibis.expr_struct (conditional), narwhals._ibis.expr (conditional), narwhals._ibis.group_by (conditional), narwhals._ibis.selectors (conditional)
|
||||
missing module named pyspark - imported by narwhals.dependencies (conditional)
|
||||
missing module named modin - imported by narwhals.dependencies (conditional)
|
||||
missing module named 'vegafusion.runtime' - imported by altair.utils._vegafusion_data (conditional)
|
||||
missing module named altair.vegalite.SCHEMA_VERSION - imported by altair.vegalite (delayed), altair.utils._importers (delayed)
|
||||
missing module named vl_convert - imported by altair.utils._importers (delayed, optional)
|
||||
missing module named vegafusion - imported by altair.utils._importers (delayed, optional)
|
||||
missing module named altair.vegalite.v5.SCHEMA_VERSION - imported by altair.vegalite.v5 (delayed), altair.vegalite.v5.compiler (delayed)
|
||||
missing module named anywidget - imported by plotly.basewidget (top-level), altair.jupyter (optional), altair.jupyter.jupyter_chart (top-level)
|
||||
missing module named altair.VConcatSpecGenericSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.VConcatChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.UnitSpecWithFrame - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.UnitSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelVConcatSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelUnitSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelLayerSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelHConcatSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelFacetSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.TopLevelConcatSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.NonNormalizedSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.LayerSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.LayerChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.HConcatSpecGenericSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.HConcatChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.FacetSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.FacetedUnitSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.FacetChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.ConcatSpecGenericSpec - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.ConcatChart - imported by altair (top-level), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.Chart - imported by altair (delayed), altair.vegalite.v5.display (delayed), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.renderers - imported by altair (delayed), altair.utils.mimebundle (delayed)
|
||||
missing module named altair.vegalite_compilers - imported by altair (delayed), altair.utils._vegafusion_data (delayed)
|
||||
missing module named altair.data_transformers - imported by altair (delayed), altair.utils._vegafusion_data (delayed), altair.utils._transformed_data (top-level)
|
||||
missing module named altair.SchemaBase - imported by altair (conditional), altair.vegalite.v5.schema.channels (conditional)
|
||||
missing module named altair.Parameter - imported by altair (conditional), altair.vegalite.v5.schema.core (conditional), altair.vegalite.v5.schema.channels (conditional), altair.vegalite.v5.schema.mixins (conditional)
|
||||
missing module named skimage.measure.block_reduce - imported by skimage.measure (top-level), skimage.transform._warps (top-level)
|
||||
missing module named skimage.measure.label - imported by skimage.measure (top-level), skimage.restoration.inpaint (top-level)
|
||||
missing module named skimage.exposure.histogram - imported by skimage.exposure (top-level), skimage.filters.thresholding (top-level)
|
||||
missing module named skimage.exposure.is_low_contrast - imported by skimage.exposure (top-level), skimage.io._io (top-level), skimage.io._plugins.matplotlib_plugin (top-level)
|
||||
missing module named skimage.color.rgba2rgb - imported by skimage.color (delayed, conditional), skimage.exposure.exposure (delayed, conditional)
|
||||
missing module named skimage.color.rgb2gray - imported by skimage.color (top-level), skimage.measure._blur_effect (top-level), skimage.exposure.exposure (delayed, conditional)
|
||||
missing module named skimage.color.gray2rgb - imported by skimage.color (top-level), skimage.feature._daisy (top-level), skimage.feature.haar (top-level), skimage.feature.texture (top-level)
|
||||
missing module named skimage.transform.integral_image - imported by skimage.transform (top-level), skimage.feature.corner (top-level), skimage.filters.thresholding (top-level), skimage.feature.blob (top-level), skimage.feature.censure (top-level)
|
||||
missing module named skimage.transform.rescale - imported by skimage.transform (top-level), skimage.feature.sift (top-level)
|
||||
missing module named skimage.transform.pyramid_gaussian - imported by skimage.transform (top-level), skimage.feature.orb (top-level)
|
||||
missing module named skimage.draw.rectangle - imported by skimage.draw (top-level), skimage.feature.haar (top-level)
|
||||
missing module named skimage.transform.warp - imported by skimage.transform (top-level), skimage.filters._window (top-level)
|
||||
missing module named pooch - imported by skimage.data._fetchers (delayed, optional)
|
||||
missing module named 'zarr.core' - imported by tifffile.zarr (delayed, conditional, optional)
|
||||
missing module named 'zarr.abc' - imported by tifffile.zarr (optional)
|
||||
missing module named zarr - imported by tifffile.zarr (top-level)
|
||||
missing module named _imagecodecs - imported by tifffile.tifffile (delayed, conditional, optional)
|
||||
missing module named imagecodecs - imported by tifffile.tifffile (optional), imageio.plugins._tifffile (delayed, conditional, optional)
|
||||
missing module named compression - imported by tifffile._imagecodecs (delayed, optional)
|
||||
missing module named SimpleITK - imported by skimage.io._plugins.simpleitk_plugin (optional), imageio.plugins.simpleitk (delayed, optional)
|
||||
missing module named imread - imported by skimage.io._plugins.imread_plugin (optional)
|
||||
missing module named itk - imported by imageio.plugins.simpleitk (delayed, optional)
|
||||
missing module named rawpy - imported by imageio.plugins.rawpy (top-level)
|
||||
missing module named pillow_heif - imported by imageio.plugins.pillow (delayed, optional)
|
||||
missing module named 'osgeo.gdal' - imported by imageio.plugins.gdal (delayed, optional)
|
||||
missing module named 'astropy.io' - imported by imageio.plugins.fits (delayed, optional)
|
||||
missing module named imageio_ffmpeg - imported by imageio.plugins.ffmpeg (top-level)
|
||||
missing module named tkFileDialog - imported by imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named Tkinter - imported by imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named tifffile_geodb - imported by imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named imageio.plugins.tifffile_geodb - imported by imageio.plugins._tifffile (delayed, optional)
|
||||
missing module named zstd - imported by imageio.plugins._tifffile (delayed, conditional, optional)
|
||||
missing module named 'backports.lzma' - imported by imageio.plugins._tifffile (delayed, conditional, optional)
|
||||
missing module named bsdf_cli - imported by imageio.plugins._bsdf (conditional)
|
||||
missing module named osgeo - imported by skimage.io._plugins.gdal_plugin (optional)
|
||||
missing module named astropy - imported by skimage.io._plugins.fits_plugin (optional)
|
||||
missing module named skimage.metrics.mean_squared_error - imported by skimage.metrics (top-level), skimage.restoration.j_invariant (top-level)
|
||||
missing module named pywt - imported by skimage.restoration._denoise (delayed, optional)
|
||||
missing module named skimage.filters.sobel - imported by skimage.filters (delayed), skimage.measure._blur_effect (delayed)
|
||||
missing module named BaseHTTPServer - imported by plotly.io._base_renderers (optional)
|
||||
missing module named 'statsmodels.api' - imported by plotly.express.trendline_functions (delayed)
|
||||
missing module named statsmodels - imported by plotly.express.trendline_functions (delayed)
|
||||
missing module named plotly.colors.sequential - imported by plotly.colors (top-level), plotly.express._core (top-level)
|
||||
missing module named plotly.colors.qualitative - imported by plotly.colors (top-level), plotly.express._core (top-level)
|
||||
missing module named plotly.colors.validate_scale_values - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.validate_colorscale - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.validate_colors_dict - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.validate_colors - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.unlabel_rgb - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.unconvert_from_RGB_255 - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.n_colors - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.label_rgb - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.hex_to_rgb - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.find_intermediate_color - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.convert_to_RGB_255 - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.colorscale_to_scale - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.colorscale_to_colors - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.color_parser - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.PLOTLY_SCALES - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named plotly.colors.DEFAULT_PLOTLY_COLORS - imported by plotly.colors (top-level), plotly.figure_factory.utils (top-level)
|
||||
missing module named 'plotly.version' - imported by plotly (conditional)
|
||||
missing module named choreographer - imported by plotly.io._kaleido (delayed, conditional)
|
||||
missing module named 'kaleido.errors' - imported by plotly.io._kaleido (delayed, conditional)
|
||||
missing module named 'kaleido.scopes' - imported by plotly.io._kaleido (conditional, optional)
|
||||
missing module named kaleido - imported by plotly.io._kaleido (delayed, conditional, optional)
|
||||
missing module named graphviz - imported by streamlit.type_util (conditional), streamlit.elements.graphviz_chart (conditional)
|
||||
missing module named 'bokeh.embed' - imported by streamlit.elements.bokeh_chart (delayed)
|
||||
missing module named bokeh - imported by streamlit.elements.bokeh_chart (delayed, conditional)
|
||||
missing module named ui - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
missing module named splash - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
BIN
qt_app_pyside1/build/FixedDebug/xref-FixedDebug.html
LFS
Normal file
BIN
qt_app_pyside1/build/FixedDebug/xref-FixedDebug.html
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/Analysis-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/Analysis-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/EXE-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/EXE-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/PKG-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/PKG-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/PYZ-00.pyz
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/PYZ-00.pyz
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/PYZ-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/PYZ-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/QuickDebug.pkg
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/QuickDebug.pkg
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/base_library.zip
Normal file
BIN
qt_app_pyside1/build/QuickDebug/base_library.zip
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod01_archive.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod01_archive.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod02_importers.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod02_importers.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod03_ctypes.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod03_ctypes.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod04_pywin32.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/pyimod04_pywin32.pyc
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/QuickDebug/localpycs/struct.pyc
Normal file
BIN
qt_app_pyside1/build/QuickDebug/localpycs/struct.pyc
Normal file
Binary file not shown.
28
qt_app_pyside1/build/QuickDebug/warn-QuickDebug.txt
Normal file
28
qt_app_pyside1/build/QuickDebug/warn-QuickDebug.txt
Normal file
@@ -0,0 +1,28 @@
|
||||
|
||||
This file lists modules PyInstaller was not able to find. This does not
|
||||
necessarily mean this module is required for running your program. Python and
|
||||
Python 3rd-party packages include a lot of conditional or optional modules. For
|
||||
example the module 'ntpath' only exists on Windows, whereas the module
|
||||
'posixpath' only exists on Posix systems.
|
||||
|
||||
Types if import:
|
||||
* top-level: imported at the top-level - look at these first
|
||||
* conditional: imported within an if-statement
|
||||
* delayed: imported within a function
|
||||
* optional: imported within a try-except-statement
|
||||
|
||||
IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
|
||||
tracking down the missing module yourself. Thanks!
|
||||
|
||||
missing module named 'org.python' - imported by copy (optional)
|
||||
missing module named org - imported by pickle (optional)
|
||||
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional)
|
||||
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional)
|
||||
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional)
|
||||
missing module named resource - imported by posix (top-level)
|
||||
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional)
|
||||
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional)
|
||||
missing module named ui - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
missing module named splash - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
missing module named _posixsubprocess - imported by subprocess (conditional)
|
||||
missing module named fcntl - imported by subprocess (optional)
|
||||
BIN
qt_app_pyside1/build/QuickDebug/xref-QuickDebug.html
LFS
Normal file
BIN
qt_app_pyside1/build/QuickDebug/xref-QuickDebug.html
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/Analysis-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/Analysis-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/EXE-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/EXE-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/PKG-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/PKG-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/PYZ-00.pyz
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/PYZ-00.pyz
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/PYZ-00.toc
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/PYZ-00.toc
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/TrafficMonitor.pkg
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/TrafficMonitor.pkg
LFS
Normal file
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/base_library.zip
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/base_library.zip
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
qt_app_pyside1/build/TrafficMonitor/localpycs/struct.pyc
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/localpycs/struct.pyc
Normal file
Binary file not shown.
773
qt_app_pyside1/build/TrafficMonitor/warn-TrafficMonitor.txt
Normal file
773
qt_app_pyside1/build/TrafficMonitor/warn-TrafficMonitor.txt
Normal file
@@ -0,0 +1,773 @@
|
||||
|
||||
This file lists modules PyInstaller was not able to find. This does not
|
||||
necessarily mean this module is required for running your program. Python and
|
||||
Python 3rd-party packages include a lot of conditional or optional modules. For
|
||||
example the module 'ntpath' only exists on Windows, whereas the module
|
||||
'posixpath' only exists on Posix systems.
|
||||
|
||||
Types if import:
|
||||
* top-level: imported at the top-level - look at these first
|
||||
* conditional: imported within an if-statement
|
||||
* delayed: imported within a function
|
||||
* optional: imported within a try-except-statement
|
||||
|
||||
IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
|
||||
tracking down the missing module yourself. Thanks!
|
||||
|
||||
missing module named usercustomize - imported by site (delayed, optional)
|
||||
missing module named sitecustomize - imported by site (delayed, optional)
|
||||
missing module named 'org.python' - imported by copy (optional), xml.sax (delayed, conditional), setuptools.sandbox (conditional)
|
||||
missing module named org - imported by pickle (optional)
|
||||
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional), http.server (delayed, optional), webbrowser (delayed), psutil (optional), netrc (delayed, conditional), getpass (delayed), distutils.util (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), distutils.archive_util (optional), setuptools._distutils.util (delayed, conditional, optional), setuptools._distutils.archive_util (optional)
|
||||
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib (delayed, optional), subprocess (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), distutils.archive_util (optional), setuptools._distutils.archive_util (optional)
|
||||
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional)
|
||||
missing module named resource - imported by posix (top-level), fsspec.asyn (conditional, optional), torch._inductor.codecache (delayed, conditional)
|
||||
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional), zipimport (top-level)
|
||||
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional), zipimport (top-level)
|
||||
missing module named _posixsubprocess - imported by subprocess (conditional), multiprocessing.util (delayed), joblib.externals.loky.backend.fork_exec (delayed)
|
||||
missing module named fcntl - imported by subprocess (optional), xmlrpc.server (optional), tqdm.utils (delayed, optional), filelock._unix (conditional, optional), absl.flags._helpers (optional), pty (delayed, optional), torch.testing._internal.distributed.distributed_test (conditional)
|
||||
missing module named win32evtlog - imported by logging.handlers (delayed, optional)
|
||||
missing module named win32evtlogutil - imported by logging.handlers (delayed, optional)
|
||||
missing module named startup - imported by pyreadline3.keysyms.common (conditional), pyreadline3.keysyms.keysyms (conditional)
|
||||
missing module named sets - imported by pyreadline3.keysyms.common (optional), pytz.tzinfo (optional)
|
||||
missing module named System - imported by pyreadline3.clipboard.ironpython_clipboard (top-level), pyreadline3.keysyms.ironpython_keysyms (top-level), pyreadline3.console.ironpython_console (top-level), pyreadline3.rlmain (conditional)
|
||||
missing module named console - imported by pyreadline3.console.ansi (conditional)
|
||||
missing module named clr - imported by pyreadline3.clipboard.ironpython_clipboard (top-level), pyreadline3.console.ironpython_console (top-level)
|
||||
missing module named IronPythonConsole - imported by pyreadline3.console.ironpython_console (top-level)
|
||||
missing module named vms_lib - imported by platform (delayed, optional)
|
||||
missing module named 'java.lang' - imported by platform (delayed, optional), xml.sax._exceptions (conditional)
|
||||
missing module named java - imported by platform (delayed)
|
||||
missing module named _winreg - imported by platform (delayed, optional), pygments.formatters.img (optional)
|
||||
missing module named termios - imported by tty (top-level), getpass (optional), tqdm.utils (delayed, optional), absl.flags._helpers (optional)
|
||||
missing module named pyimod02_importers - imported by C:\Users\jatin\.conda\envs\traffic_monitor\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgutil.py (delayed), C:\Users\jatin\.conda\envs\traffic_monitor\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgres.py (delayed)
|
||||
missing module named _manylinux - imported by packaging._manylinux (delayed, optional), setuptools._vendor.packaging._manylinux (delayed, optional), wheel.vendored.packaging._manylinux (delayed, optional)
|
||||
missing module named '_typeshed.importlib' - imported by pkg_resources (conditional)
|
||||
missing module named _typeshed - imported by pkg_resources (conditional), setuptools.glob (conditional), setuptools.compat.py311 (conditional), torch.utils._backport_slots (conditional), setuptools._distutils.dist (conditional)
|
||||
missing module named jnius - imported by setuptools._vendor.platformdirs.android (delayed, conditional, optional)
|
||||
missing module named android - imported by setuptools._vendor.platformdirs.android (delayed, conditional, optional)
|
||||
missing module named _posixshmem - imported by multiprocessing.resource_tracker (conditional), multiprocessing.shared_memory (conditional)
|
||||
missing module named multiprocessing.set_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
|
||||
missing module named multiprocessing.get_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
|
||||
missing module named multiprocessing.get_context - imported by multiprocessing (top-level), multiprocessing.pool (top-level), multiprocessing.managers (top-level), multiprocessing.sharedctypes (top-level), joblib.externals.loky.backend.context (top-level)
|
||||
missing module named multiprocessing.TimeoutError - imported by multiprocessing (top-level), multiprocessing.pool (top-level), joblib.parallel (top-level)
|
||||
missing module named _scproxy - imported by urllib.request (conditional)
|
||||
missing module named multiprocessing.BufferTooShort - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
|
||||
missing module named multiprocessing.AuthenticationError - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
|
||||
missing module named multiprocessing.RLock - imported by multiprocessing (delayed, conditional, optional), tqdm.std (delayed, conditional, optional)
|
||||
missing module named multiprocessing.Pool - imported by multiprocessing (delayed, conditional), scipy._lib._util (delayed, conditional), torchvision.datasets.kinetics (top-level)
|
||||
missing module named asyncio.DefaultEventLoopPolicy - imported by asyncio (delayed, conditional), asyncio.events (delayed, conditional)
|
||||
missing module named 'distutils._modified' - imported by setuptools._distutils.file_util (delayed)
|
||||
missing module named 'distutils._log' - imported by setuptools._distutils.command.bdist_dumb (top-level), setuptools._distutils.command.bdist_rpm (top-level), setuptools._distutils.command.build_clib (top-level), setuptools._distutils.command.build_ext (top-level), setuptools._distutils.command.build_py (top-level), setuptools._distutils.command.build_scripts (top-level), setuptools._distutils.command.clean (top-level), setuptools._distutils.command.config (top-level), setuptools._distutils.command.install (top-level), setuptools._distutils.command.install_scripts (top-level), setuptools._distutils.command.sdist (top-level)
|
||||
missing module named trove_classifiers - imported by setuptools.config._validate_pyproject.formats (optional)
|
||||
missing module named importlib_resources - imported by setuptools._vendor.jaraco.text (optional), tqdm.cli (delayed, conditional, optional), jsonschema_specifications._core (optional)
|
||||
missing module named numpy.arccosh - imported by numpy (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.arcsinh - imported by numpy (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.arctan - imported by numpy (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.tan - imported by numpy (top-level), scipy.signal._spline_filters (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.greater - imported by numpy (top-level), scipy.optimize._minpack_py (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.power - imported by numpy (top-level), scipy.stats._kde (top-level)
|
||||
missing module named numpy.sinh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.cosh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.tanh - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.expm1 - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.log1p - imported by numpy (top-level), scipy.stats._discrete_distns (top-level)
|
||||
missing module named numpy.ceil - imported by numpy (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.log - imported by numpy (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._discrete_distns (top-level), scipy.stats._morestats (top-level), scipy.signal._waveforms (top-level)
|
||||
missing module named numpy.logical_and - imported by numpy (top-level), scipy.stats._distn_infrastructure (top-level)
|
||||
missing module named numpy.sign - imported by numpy (top-level), scipy.linalg._matfuncs (top-level)
|
||||
missing module named numpy.conjugate - imported by numpy (top-level), scipy.linalg._matfuncs (top-level), scipy.signal._filter_design (top-level)
|
||||
missing module named numpy.logical_not - imported by numpy (top-level), scipy.linalg._matfuncs (top-level)
|
||||
missing module named numpy.single - imported by numpy (top-level), scipy.linalg._decomp_schur (top-level)
|
||||
missing module named numpy.floor - imported by numpy (top-level), scipy.special._basic (top-level), scipy.special._orthogonal (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._discrete_distns (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.arcsin - imported by numpy (top-level), scipy.linalg._decomp_svd (top-level)
|
||||
missing module named numpy.arccos - imported by numpy (top-level), scipy.linalg._decomp_svd (top-level), scipy.special._orthogonal (top-level)
|
||||
missing module named numpy.complex128 - imported by numpy (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.complex64 - imported by numpy (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.conj - imported by numpy (top-level), scipy.linalg._decomp (top-level), scipy.io._mmio (top-level)
|
||||
missing module named numpy.inexact - imported by numpy (top-level), scipy.linalg._decomp (top-level), scipy.special._basic (top-level), scipy.optimize._minpack_py (top-level)
|
||||
missing module named _dummy_thread - imported by numpy.core.arrayprint (optional), cffi.lock (conditional, optional), torch._jit_internal (optional)
|
||||
missing module named numpy.core.result_type - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.float_ - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.number - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.object_ - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.max - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.all - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.errstate - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.bool_ - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.inf - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.isnan - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.array2string - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.imag - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.real - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.lib.iscomplexobj - imported by numpy.lib (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.signbit - imported by numpy.core (delayed), numpy.testing._private.utils (delayed)
|
||||
missing module named numpy.core.isscalar - imported by numpy.core (delayed), numpy.testing._private.utils (delayed), numpy.lib.polynomial (top-level)
|
||||
missing module named win32pdh - imported by numpy.testing._private.utils (delayed, conditional)
|
||||
missing module named numpy.core.array - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.isnat - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.ndarray - imported by numpy.core (top-level), numpy.testing._private.utils (top-level), numpy.lib.utils (top-level)
|
||||
missing module named numpy.core.array_repr - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.arange - imported by numpy.core (top-level), numpy.testing._private.utils (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.empty - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.float32 - imported by numpy.core (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.intp - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.core.linspace - imported by numpy.core (top-level), numpy.lib.index_tricks (top-level)
|
||||
missing module named numpy.core.iinfo - imported by numpy.core (top-level), numpy.lib.twodim_base (top-level)
|
||||
missing module named numpy.core.transpose - imported by numpy.core (top-level), numpy.lib.function_base (top-level)
|
||||
missing module named numpy._typing._ufunc - imported by numpy._typing (conditional)
|
||||
missing module named numpy.uint - imported by numpy (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level)
|
||||
missing module named numpy.core.asarray - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.utils (top-level), numpy.fft._pocketfft (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.integer - imported by numpy.core (top-level), numpy.fft.helper (top-level)
|
||||
missing module named numpy.core.sqrt - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.conjugate - imported by numpy.core (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.swapaxes - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.zeros - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.fft._pocketfft (top-level)
|
||||
missing module named numpy.core.reciprocal - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sort - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.argsort - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sign - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.count_nonzero - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.divide - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.matmul - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.asanyarray - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.atleast_2d - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.prod - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.amax - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.amin - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.moveaxis - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.geterrobj - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.finfo - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.isfinite - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.sum - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.multiply - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.add - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.dot - imported by numpy.core (top-level), numpy.linalg.linalg (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.Inf - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.newaxis - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.complexfloating - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.inexact - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.cdouble - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.csingle - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.double - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.single - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.intc - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named numpy.core.empty_like - imported by numpy.core (top-level), numpy.linalg.linalg (top-level)
|
||||
missing module named pyodide_js - imported by threadpoolctl (delayed, optional)
|
||||
missing module named numpy.core.ufunc - imported by numpy.core (top-level), numpy.lib.utils (top-level)
|
||||
missing module named numpy.core.ones - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.hstack - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.atleast_1d - imported by numpy.core (top-level), numpy.lib.polynomial (top-level)
|
||||
missing module named numpy.core.atleast_3d - imported by numpy.core (top-level), numpy.lib.shape_base (top-level)
|
||||
missing module named numpy.core.vstack - imported by numpy.core (top-level), numpy.lib.shape_base (top-level)
|
||||
missing module named pickle5 - imported by numpy.compat.py3k (optional)
|
||||
missing module named numpy.eye - imported by numpy (delayed), numpy.core.numeric (delayed), scipy.optimize._optimize (top-level), scipy.linalg._decomp (top-level), scipy.interpolate._pade (top-level), scipy.signal._lti_conversion (top-level)
|
||||
missing module named numpy.recarray - imported by numpy (top-level), numpy.lib.recfunctions (top-level), numpy.ma.mrecords (top-level)
|
||||
missing module named numpy.expand_dims - imported by numpy (top-level), numpy.ma.core (top-level)
|
||||
missing module named numpy.array - imported by numpy (top-level), numpy.ma.core (top-level), numpy.ma.extras (top-level), numpy.ma.mrecords (top-level), scipy.linalg._decomp (top-level), scipy.sparse.linalg._isolve.utils (top-level), scipy.linalg._decomp_schur (top-level), scipy.stats._stats_py (top-level), scipy.interpolate._interpolate (top-level), scipy.interpolate._fitpack_impl (top-level), scipy.interpolate._fitpack2 (top-level), scipy.integrate._ode (top-level), scipy._lib._finite_differences (top-level), scipy.stats._morestats (top-level), scipy.optimize._lbfgsb_py (top-level), scipy.optimize._tnc (top-level), scipy.optimize._slsqp_py (top-level), dill._objects (optional), scipy.io._netcdf (top-level), scipy.signal._spline_filters (top-level), scipy.signal._filter_design (top-level), scipy.signal._lti_conversion (top-level)
|
||||
missing module named numpy.iscomplexobj - imported by numpy (top-level), numpy.ma.core (top-level), scipy.linalg._decomp (top-level), scipy.linalg._decomp_ldl (top-level)
|
||||
missing module named numpy.amin - imported by numpy (top-level), numpy.ma.core (top-level), scipy.stats._morestats (top-level)
|
||||
missing module named numpy.amax - imported by numpy (top-level), numpy.ma.core (top-level), scipy.linalg._matfuncs (top-level), scipy.stats._morestats (top-level)
|
||||
missing module named numpy.isinf - imported by numpy (top-level), numpy.testing._private.utils (top-level), scipy.stats._distn_infrastructure (top-level)
|
||||
missing module named numpy.isnan - imported by numpy (top-level), numpy.testing._private.utils (top-level)
|
||||
missing module named numpy.isfinite - imported by numpy (top-level), numpy.testing._private.utils (top-level), scipy.linalg._decomp (top-level), scipy.linalg._matfuncs (top-level), scipy.optimize._slsqp_py (top-level)
|
||||
missing module named numpy.float64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), scipy.stats._mstats_extras (top-level), scipy.optimize._lbfgsb_py (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.float32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), scipy.signal._spline_filters (top-level)
|
||||
missing module named numpy.uint64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint16 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.uint8 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int64 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int32 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), dill._objects (optional), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int16 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.int8 - imported by numpy (top-level), numpy.array_api._typing (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.bytes_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.str_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.void - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.object_ - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.datetime64 - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.timedelta64 - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.number - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.complexfloating - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.floating - imported by numpy (top-level), numpy._typing._array_like (top-level), torch._dynamo.variables.misc (optional)
|
||||
missing module named numpy.integer - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ctypeslib (top-level)
|
||||
missing module named numpy.unsignedinteger - imported by numpy (top-level), numpy._typing._array_like (top-level)
|
||||
missing module named numpy.bool_ - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ma.core (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random._generator (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named numpy.generic - imported by numpy (top-level), numpy._typing._array_like (top-level), torch._dynamo.variables.misc (optional)
|
||||
missing module named numpy.dtype - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.array_api._typing (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), numpy.ctypeslib (top-level), scipy._lib.array_api_compat.numpy._info (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy._lib.array_api_compat.dask.array._info (top-level), scipy.optimize._minpack_py (top-level), dill._dill (delayed), scipy.io._netcdf (top-level), torch._dynamo.variables.misc (optional), sklearn.externals.array_api_compat.numpy._info (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level), sklearn.externals.array_api_compat.dask.array._info (top-level)
|
||||
missing module named numpy.ndarray - imported by numpy (top-level), numpy._typing._array_like (top-level), numpy.ma.core (top-level), numpy.ma.extras (top-level), numpy.lib.recfunctions (top-level), numpy.ma.mrecords (top-level), numpy.random.mtrand (top-level), numpy.random.bit_generator (top-level), numpy.random._philox (top-level), numpy.random._sfc64 (top-level), numpy.random._generator (top-level), numpy.random._mt19937 (top-level), numpy.ctypeslib (top-level), scipy._lib.array_api_compat.numpy._typing (top-level), scipy.stats._distn_infrastructure (top-level), scipy.stats._mstats_basic (top-level), scipy.stats._mstats_extras (top-level), pandas.compat.numpy.function (top-level), dill._dill (delayed), scipy.io._mmio (top-level), sklearn.externals.array_api_compat.numpy._typing (top-level)
|
||||
missing module named numpy.ufunc - imported by numpy (top-level), numpy._typing (top-level), numpy.testing.overrides (top-level), dill._dill (delayed), dill._objects (optional)
|
||||
missing module named numpy.histogramdd - imported by numpy (delayed), numpy.lib.twodim_base (delayed)
|
||||
missing module named numpy._distributor_init_local - imported by numpy (optional), numpy._distributor_init (optional)
|
||||
missing module named openvino_tokenizers - imported by openvino.tools.ovc.utils (delayed, optional)
|
||||
missing module named StringIO - imported by six (conditional)
|
||||
missing module named six.moves.zip - imported by six.moves (top-level), pasta.base.annotate (top-level)
|
||||
runtime module named six.moves - imported by dateutil.tz.tz (top-level), dateutil.tz._factories (top-level), dateutil.tz.win (top-level), dateutil.rrule (top-level), astunparse (top-level), tensorflow.python.distribute.multi_process_runner (top-level), tensorflow.python.distribute.coordinator.cluster_coordinator (top-level), six.moves.urllib (top-level), pasta.base.annotate (top-level)
|
||||
missing module named six.moves.cStringIO - imported by six.moves (top-level), astunparse (top-level)
|
||||
missing module named six.moves.range - imported by six.moves (top-level), dateutil.rrule (top-level)
|
||||
missing module named rules_python - imported by tensorflow.python.platform.resource_loader (optional)
|
||||
missing module named google.protobuf.pyext._message - imported by google.protobuf.pyext (conditional, optional), google.protobuf.internal.api_implementation (conditional, optional), google.protobuf.descriptor (conditional), google.protobuf.pyext.cpp_message (conditional)
|
||||
missing module named google.protobuf.enable_deterministic_proto_serialization - imported by google.protobuf (optional), google.protobuf.internal.api_implementation (optional)
|
||||
missing module named google.protobuf.internal._api_implementation - imported by google.protobuf.internal (optional), google.protobuf.internal.api_implementation (optional)
|
||||
missing module named astn - imported by gast.ast2 (top-level)
|
||||
missing module named theano - imported by opt_einsum.backends.theano (delayed)
|
||||
missing module named jax - imported by optree.integrations.jax (top-level), scipy._lib.array_api_compat.common._helpers (delayed), scipy._lib._array_api (delayed, conditional), opt_einsum.backends.jax (delayed, conditional), keras.src.trainers.data_adapters.data_adapter_utils (delayed), keras.src.backend.jax.core (top-level), keras.src.backend.jax.distribution_lib (top-level), keras.src.backend.jax.image (top-level), keras.src.backend.jax.linalg (top-level), keras.src.backend.jax.math (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.random (top-level), keras.src.backend.jax.rnn (top-level), keras.src.backend.jax.trainer (top-level), keras.src.backend.numpy.nn (top-level), keras.src.backend.jax.export (delayed), keras.src.backend.jax.optimizer (top-level), keras.src.ops.nn (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed), sklearn.externals.array_api_extra._lib._lazy (delayed, conditional), tensorflow.lite.python.util (optional), openvino.frontend.jax.utils (top-level), openvino.frontend.jax.jaxpr_decoder (top-level), openvino.tools.ovc.convert_impl (delayed, conditional)
|
||||
missing module named cupy - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), scipy._lib.array_api_compat.cupy (top-level), scipy._lib.array_api_compat.cupy._aliases (top-level), scipy._lib.array_api_compat.cupy._info (top-level), scipy._lib.array_api_compat.cupy._typing (top-level), scipy._lib._array_api (delayed, conditional), opt_einsum.backends.cupy (delayed), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.cupy (top-level), sklearn.externals.array_api_compat.cupy._aliases (top-level), sklearn.externals.array_api_compat.cupy._info (top-level), sklearn.externals.array_api_compat.cupy._typing (top-level), sklearn.utils._testing (delayed, conditional), sklearn.externals.array_api_compat.cupy.fft (top-level), sklearn.externals.array_api_compat.cupy.linalg (top-level)
|
||||
missing module named simplejson - imported by requests.compat (conditional, optional), huggingface_hub.utils._fixes (optional)
|
||||
missing module named dummy_threading - imported by requests.cookies (optional), joblib.compressor (optional)
|
||||
missing module named 'h2.events' - imported by urllib3.http2.connection (top-level)
|
||||
missing module named 'h2.connection' - imported by urllib3.http2.connection (top-level)
|
||||
missing module named h2 - imported by urllib3.http2.connection (top-level)
|
||||
missing module named zstandard - imported by urllib3.util.request (optional), urllib3.response (optional), fsspec.compression (optional)
|
||||
missing module named brotlicffi - imported by urllib3.util.request (optional), urllib3.response (optional), aiohttp.compression_utils (optional)
|
||||
missing module named collections.Callable - imported by collections (optional), cffi.api (optional), socks (optional)
|
||||
missing module named bcrypt - imported by cryptography.hazmat.primitives.serialization.ssh (optional)
|
||||
missing module named cryptography.x509.UnsupportedExtension - imported by cryptography.x509 (optional), urllib3.contrib.pyopenssl (optional)
|
||||
missing module named chardet - imported by requests (optional), pygments.lexer (delayed, conditional, optional)
|
||||
missing module named 'pyodide.ffi' - imported by urllib3.contrib.emscripten.fetch (delayed, optional)
|
||||
missing module named pyodide - imported by urllib3.contrib.emscripten.fetch (top-level)
|
||||
missing module named js - imported by urllib3.contrib.emscripten.fetch (top-level), fsspec.implementations.http_sync (delayed, optional)
|
||||
missing module named oauth2client - imported by tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver (optional), tensorflow.python.tpu.client.client (optional)
|
||||
missing module named googleapiclient - imported by tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver (optional), tensorflow.python.tpu.client.client (optional)
|
||||
missing module named cloud_tpu_client - imported by tensorflow.python.distribute.cluster_resolver.tpu.tpu_cluster_resolver (optional)
|
||||
missing module named kubernetes - imported by tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver (delayed, conditional, optional)
|
||||
missing module named distributed - imported by fsspec.transaction (delayed), joblib._dask (optional), joblib._parallel_backends (delayed, optional)
|
||||
missing module named 'sphinx.ext' - imported by pyarrow.vendored.docscrape (delayed, conditional)
|
||||
missing module named dateutil.tz.tzfile - imported by dateutil.tz (top-level), dateutil.zoneinfo (top-level)
|
||||
missing module named numexpr - imported by pandas.core.computation.expressions (conditional), pandas.core.computation.engines (delayed)
|
||||
missing module named pandas.core.groupby.PanelGroupBy - imported by pandas.core.groupby (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named numba - imported by pandas.core._numba.executor (delayed, conditional), pandas.core.util.numba_ (delayed, conditional), pandas.core.window.numba_ (delayed, conditional), pandas.core.window.online (delayed, conditional), pandas.core._numba.kernels.mean_ (top-level), pandas.core._numba.kernels.shared (top-level), pandas.core._numba.kernels.sum_ (top-level), pandas.core._numba.kernels.min_max_ (top-level), pandas.core._numba.kernels.var_ (top-level), pandas.core.groupby.numba_ (delayed, conditional), pandas.core._numba.extensions (top-level)
|
||||
missing module named 'numba.extending' - imported by pandas.core._numba.kernels.sum_ (top-level)
|
||||
missing module named pandas.core.window._Rolling_and_Expanding - imported by pandas.core.window (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named 'numba.typed' - imported by pandas.core._numba.extensions (delayed)
|
||||
missing module named 'numba.core' - imported by pandas.core._numba.extensions (top-level)
|
||||
missing module named pytest - imported by scipy._lib._testutils (delayed), sympy.testing.runtests_pytest (optional), pandas._testing._io (delayed), pandas._testing (delayed), torch.testing._internal.common_utils (delayed, conditional, optional), h5py.tests (delayed, optional), networkx.classes.backends (conditional, optional), torch.testing._internal.optests.generate_tests (delayed, conditional), sklearn.utils._testing (optional), fsspec.conftest (top-level), pyarrow.conftest (top-level), pyarrow.tests.util (top-level), torch._numpy.testing.utils (delayed)
|
||||
missing module named cupy_backends - imported by scipy._lib.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'cupy.cuda' - imported by scipy._lib.array_api_compat.cupy._typing (top-level), scipy._lib.array_api_compat.common._helpers (delayed), sklearn.externals.array_api_compat.cupy._typing (top-level), sklearn.externals.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'jax.experimental' - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), keras.src.trainers.data_adapters.data_adapter_utils (delayed), keras.src.testing.test_case (delayed, conditional), keras.src.backend.jax.core (top-level), keras.src.backend.jax.distribution_lib (top-level), keras.src.backend.jax.numpy (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.sparse (top-level), keras.src.backend.jax.export (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named 'jax.numpy' - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), keras.src.backend.jax.core (top-level), keras.src.backend.jax.image (top-level), keras.src.backend.jax.linalg (top-level), keras.src.backend.jax.math (top-level), keras.src.backend.jax.numpy (top-level), keras.src.backend.jax.nn (top-level), keras.src.backend.jax.sparse (top-level), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), openvino.frontend.jax.utils (top-level)
|
||||
missing module named 'dask.array' - imported by scipy._lib.array_api_compat.dask.array (top-level), scipy._lib.array_api_compat.dask.array._aliases (top-level), scipy._lib.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional), sklearn.externals.array_api_compat.dask.array (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level), sklearn.externals.array_api_compat.dask.array.fft (top-level), sklearn.externals.array_api_compat.dask.array.linalg (top-level)
|
||||
missing module named sparse - imported by scipy._lib.array_api_compat.common._helpers (delayed, conditional), scipy.sparse.linalg._expm_multiply (delayed, conditional), scipy.sparse.linalg._matfuncs (delayed, conditional), sklearn.externals.array_api_compat.common._helpers (delayed, conditional)
|
||||
missing module named dask - imported by scipy._lib.array_api_compat.common._helpers (delayed), joblib._dask (optional), sklearn.externals.array_api_extra._lib._lazy (delayed, conditional), fsspec.implementations.dask (top-level)
|
||||
missing module named ndonnx - imported by scipy._lib.array_api_compat.common._helpers (delayed), sklearn.externals.array_api_compat.common._helpers (delayed)
|
||||
missing module named 'numpy.lib.array_utils' - imported by scipy._lib.array_api_compat.common._linalg (conditional), joblib._memmapping_reducer (delayed, optional), sklearn.externals.array_api_compat.common._linalg (conditional)
|
||||
missing module named 'numpy.linalg._linalg' - imported by scipy._lib.array_api_compat.numpy.linalg (delayed, optional), sklearn.externals.array_api_compat.numpy.linalg (delayed, optional)
|
||||
missing module named Cython - imported by scipy._lib._testutils (optional)
|
||||
missing module named cython - imported by scipy._lib._testutils (optional), av.packet (top-level), av.audio.codeccontext (top-level), av.filter.loudnorm (top-level), pyarrow.conftest (optional)
|
||||
missing module named sphinx - imported by scipy._lib._docscrape (delayed, conditional)
|
||||
missing module named cupyx - imported by scipy._lib._array_api (delayed, conditional)
|
||||
missing module named scipy.sparse.issparse - imported by scipy.sparse (top-level), scipy.sparse.linalg._interface (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.sparse.linalg._norm (top-level), scipy.sparse.csgraph._laplacian (top-level), scipy._lib._array_api (delayed), scipy.integrate._ivp.bdf (top-level), scipy.optimize._numdiff (top-level), scipy.integrate._ivp.radau (top-level), scipy.optimize._constraints (top-level), scipy.optimize._trustregion_constr.projections (top-level), scipy.optimize._lsq.least_squares (top-level), scipy.optimize._lsq.common (top-level), scipy.optimize._lsq.lsq_linear (top-level), scipy.optimize._linprog_highs (top-level), scipy.optimize._differentialevolution (top-level), scipy.optimize._milp (top-level), pandas.core.dtypes.common (delayed, conditional, optional), scipy.io.matlab._mio (delayed, conditional), scipy.io._fast_matrix_market (top-level), scipy.io._mmio (top-level), sklearn.utils._param_validation (top-level), sklearn.externals._scipy.sparse.csgraph._laplacian (top-level), sklearn.utils._set_output (top-level), sklearn.utils.multiclass (top-level), sklearn.metrics.cluster._unsupervised (top-level), sklearn.metrics.pairwise (top-level), sklearn.metrics._pairwise_distances_reduction._dispatcher (top-level), sklearn.cluster._feature_agglomeration (top-level), sklearn.cluster._bicluster (top-level), sklearn.neighbors._base (top-level), sklearn.decomposition._pca (top-level), sklearn.cluster._hdbscan.hdbscan (top-level), sklearn.cluster._optics (top-level), sklearn.manifold._isomap (top-level), sklearn.manifold._t_sne (top-level), sklearn.metrics._classification (top-level), sklearn.metrics._ranking (top-level), sklearn.utils._indexing (top-level), tensorflow.python.keras.engine.data_adapter (delayed, optional), tensorflow.python.keras.engine.training_arrays_v1 (optional), tensorflow.python.keras.engine.training_v1 (optional), sklearn.tree._classes (top-level), scipy.sparse.csgraph._validation (top-level)
|
||||
missing module named scipy.linalg._fblas_64 - imported by scipy.linalg (optional), scipy.linalg.blas (optional)
|
||||
missing module named scipy.linalg._cblas - imported by scipy.linalg (optional), scipy.linalg.blas (optional)
|
||||
missing module named scipy.linalg._flapack_64 - imported by scipy.linalg (optional), scipy.linalg.lapack (optional)
|
||||
missing module named scipy.linalg._clapack - imported by scipy.linalg (optional), scipy.linalg.lapack (optional)
|
||||
missing module named scipy.special.inv_boxcox - imported by scipy.special (top-level), sklearn.preprocessing._data (top-level)
|
||||
missing module named scipy.special.boxcox - imported by scipy.special (top-level), sklearn.preprocessing._data (top-level)
|
||||
missing module named scipy.special.sph_jn - imported by scipy.special (delayed, conditional, optional), sympy.functions.special.bessel (delayed, conditional, optional)
|
||||
missing module named scipy.special.gammaincinv - imported by scipy.special (top-level), scipy.stats._qmvnt (top-level)
|
||||
missing module named scipy.special.ive - imported by scipy.special (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.special.betaln - imported by scipy.special (top-level), scipy.stats._discrete_distns (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.special.beta - imported by scipy.special (top-level), scipy.stats._tukeylambda_stats (top-level)
|
||||
missing module named scipy.special.loggamma - imported by scipy.special (top-level), scipy.fft._fftlog_backend (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.interpolate.PPoly - imported by scipy.interpolate (top-level), scipy.interpolate._cubic (top-level), scipy.spatial.transform._rotation_spline (delayed), scipy.integrate._bvp (delayed)
|
||||
missing module named _curses - imported by curses (top-level), curses.has_key (top-level)
|
||||
missing module named olefile - imported by PIL.FpxImagePlugin (top-level), PIL.MicImagePlugin (top-level)
|
||||
missing module named xmlrpclib - imported by defusedxml.xmlrpc (conditional)
|
||||
missing module named railroad - imported by pyparsing.diagram (top-level)
|
||||
missing module named pyparsing.Word - imported by pyparsing (delayed), pyparsing.unicode (delayed), pydot.dot_parser (top-level)
|
||||
missing module named gi - imported by matplotlib.cbook (delayed, conditional)
|
||||
missing module named 'scikits.umfpack' - imported by scipy.optimize._linprog_ip (optional)
|
||||
missing module named 'sksparse.cholmod' - imported by scipy.optimize._linprog_ip (optional)
|
||||
missing module named sksparse - imported by scipy.optimize._trustregion_constr.projections (optional), scipy.optimize._linprog_ip (optional)
|
||||
missing module named scipy.optimize.root_scalar - imported by scipy.optimize (top-level), scipy.stats._continuous_distns (top-level), scipy.stats._stats_py (top-level), scipy.stats._multivariate (top-level)
|
||||
missing module named scipy.optimize.brentq - imported by scipy.optimize (delayed), scipy.integrate._ivp.ivp (delayed), scipy.stats._binomtest (top-level), scipy.stats._odds_ratio (top-level)
|
||||
missing module named scipy.optimize.OptimizeResult - imported by scipy.optimize (top-level), scipy.integrate._bvp (top-level), scipy.integrate._ivp.ivp (top-level), scipy._lib.cobyqa.main (top-level), scipy._lib.cobyqa.problem (top-level), scipy.optimize._lsq.least_squares (top-level), scipy.optimize._lsq.trf (top-level), scipy.optimize._lsq.dogbox (top-level), scipy.optimize._lsq.lsq_linear (top-level), scipy.optimize._lsq.trf_linear (top-level), scipy.optimize._lsq.bvls (top-level), scipy.optimize._spectral (top-level), scipy.optimize._differentialevolution (top-level), scipy.optimize._shgo (top-level), scipy.optimize._dual_annealing (top-level), scipy.optimize._qap (top-level), scipy.optimize._direct_py (top-level)
|
||||
missing module named scipy.optimize.minimize_scalar - imported by scipy.optimize (top-level), scipy.interpolate._bsplines (top-level), scipy.stats._multicomp (top-level)
|
||||
missing module named scipy.special.airy - imported by scipy.special (top-level), scipy.special._orthogonal (top-level)
|
||||
missing module named scipy.linalg.orthogonal_procrustes - imported by scipy.linalg (top-level), scipy.spatial._procrustes (top-level)
|
||||
missing module named uarray - imported by scipy._lib.uarray (conditional, optional)
|
||||
missing module named scipy.linalg.cholesky - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.optimize._optimize (top-level), scipy.optimize._minpack_py (top-level), sklearn.gaussian_process._gpc (top-level), sklearn.gaussian_process._gpr (top-level)
|
||||
missing module named scipy.linalg.cho_solve - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.optimize._trustregion_exact (top-level), scipy.optimize._lsq.common (top-level), sklearn.gaussian_process._gpc (top-level), sklearn.gaussian_process._gpr (top-level)
|
||||
missing module named scipy.linalg.cho_factor - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.optimize._lsq.common (top-level)
|
||||
missing module named scipy.linalg.inv - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy.optimize._nonlin (top-level)
|
||||
missing module named scipy.linalg.lu_solve - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.integrate._ivp.bdf (top-level), scipy.integrate._ivp.radau (top-level)
|
||||
missing module named scipy.linalg.lu_factor - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.integrate._ivp.bdf (top-level), scipy.integrate._ivp.radau (top-level)
|
||||
missing module named scipy.linalg.eigh - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._eigen.lobpcg.lobpcg (top-level), scipy._lib.cobyqa.models (top-level), sklearn.decomposition._kernel_pca (top-level), sklearn.manifold._locally_linear (top-level), sklearn.manifold._spectral_embedding (top-level)
|
||||
missing module named scipy.linalg.eig - imported by scipy.linalg (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level)
|
||||
missing module named scipy.linalg.lstsq - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level), nncf.tensor.functions.numpy_linalg (top-level), scipy.signal._fir_filter_design (top-level), scipy.signal._savitzky_golay (top-level)
|
||||
missing module named scipy.linalg.qr_insert - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level)
|
||||
missing module named scipy.linalg.svd - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level), scipy.sparse.linalg._eigen._svds (top-level), scipy.linalg._decomp_polar (top-level), scipy.optimize._minpack_py (top-level), scipy.optimize._lsq.trf (top-level), scipy.optimize._nonlin (top-level), scipy.optimize._remove_redundancy (top-level), sklearn.cluster._spectral (top-level), sklearn.manifold._locally_linear (top-level)
|
||||
missing module named scipy.linalg.solve - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level), scipy.interpolate._bsplines (top-level), scipy.interpolate._cubic (top-level), scipy.optimize._nonlin (top-level), scipy.optimize._linprog_rs (top-level), sklearn.gaussian_process._gpc (top-level), sklearn.manifold._locally_linear (top-level), scipy.signal._fir_filter_design (top-level)
|
||||
missing module named scipy.linalg.qr - imported by scipy.linalg (top-level), scipy.sparse.linalg._isolve._gcrotmk (top-level), scipy._lib.cobyqa.subsolvers.optim (top-level), scipy.optimize._lsq.trf (top-level), scipy.optimize._lsq.trf_linear (top-level), scipy.optimize._nonlin (top-level), sklearn.cluster._spectral (top-level), sklearn.manifold._locally_linear (top-level), scipy.signal._ltisys (top-level)
|
||||
missing module named scikits - imported by scipy.sparse.linalg._dsolve.linsolve (optional)
|
||||
missing module named scipy.sparse.diags - imported by scipy.sparse (delayed), scipy.sparse.linalg._special_sparse_arrays (delayed)
|
||||
missing module named scipy.sparse.spdiags - imported by scipy.sparse (delayed), scipy.sparse.linalg._special_sparse_arrays (delayed)
|
||||
missing module named scipy.sparse.dia_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level)
|
||||
missing module named scipy.sparse.kron - imported by scipy.sparse (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level)
|
||||
missing module named scipy.sparse.eye - imported by scipy.sparse (top-level), scipy.sparse.linalg._eigen.arpack.arpack (top-level), scipy.sparse.linalg._special_sparse_arrays (top-level), scipy.integrate._ivp.bdf (top-level), scipy.integrate._ivp.radau (top-level), scipy.optimize._trustregion_constr.equality_constrained_sqp (top-level), scipy.optimize._trustregion_constr.projections (top-level), sklearn.manifold._locally_linear (top-level)
|
||||
missing module named scipy.sparse.diags_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level)
|
||||
missing module named scipy.sparse.eye_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level)
|
||||
missing module named scipy.sparse.csc_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.optimize._milp (top-level), scipy.io._harwell_boeing.hb (top-level)
|
||||
missing module named scipy.sparse.csr_array - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), scipy.interpolate._bsplines (top-level), scipy.interpolate._ndbspline (top-level)
|
||||
missing module named scipy.sparse.SparseEfficiencyWarning - imported by scipy.sparse (top-level), scipy.sparse.linalg._dsolve.linsolve (top-level), sklearn.cluster._optics (top-level)
|
||||
missing module named scipy.stats.iqr - imported by scipy.stats (delayed), scipy.stats._hypotests (delayed)
|
||||
missing module named dummy_thread - imported by cffi.lock (conditional, optional)
|
||||
missing module named thread - imported by cffi.lock (conditional, optional), cffi.cparser (conditional, optional)
|
||||
missing module named cStringIO - imported by cffi.ffiplatform (optional)
|
||||
missing module named cPickle - imported by pycparser.ply.yacc (delayed, optional)
|
||||
missing module named cffi._pycparser - imported by cffi (optional), cffi.cparser (optional)
|
||||
missing module named scipy._distributor_init_local - imported by scipy (optional), scipy._distributor_init (optional)
|
||||
missing module named traitlets - imported by pandas.io.formats.printing (delayed, conditional)
|
||||
missing module named 'IPython.core' - imported by sympy.interactive.printing (delayed, optional), pandas.io.formats.printing (delayed, conditional), h5py (delayed, conditional, optional), h5py.ipy_completer (top-level), rich.pretty (delayed, optional)
|
||||
missing module named IPython - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional, optional), pandas.io.formats.printing (delayed), h5py (delayed, conditional, optional), h5py.ipy_completer (top-level), keras.src.utils.model_visualization (delayed, conditional, optional), keras.src.saving.file_editor (delayed, optional), tensorflow.python.keras.utils.vis_utils (delayed, conditional, optional)
|
||||
missing module named 'lxml.etree' - imported by openpyxl.xml (delayed, optional), openpyxl.xml.functions (conditional), pandas.io.xml (delayed), pandas.io.formats.xml (delayed), pandas.io.html (delayed), networkx.readwrite.graphml (delayed, optional)
|
||||
missing module named openpyxl.tests - imported by openpyxl.reader.excel (optional)
|
||||
missing module named 'odf.config' - imported by pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.style' - imported by pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.text' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.table' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named 'odf.opendocument' - imported by pandas.io.excel._odfreader (delayed), pandas.io.excel._odswriter (delayed)
|
||||
missing module named xlrd - imported by pandas.io.excel._xlrd (delayed, conditional), pandas.io.excel._base (delayed, conditional)
|
||||
missing module named pyxlsb - imported by pandas.io.excel._pyxlsb (delayed, conditional)
|
||||
missing module named 'odf.office' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named 'odf.element' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named 'odf.namespaces' - imported by pandas.io.excel._odfreader (delayed)
|
||||
missing module named odf - imported by pandas.io.excel._odfreader (conditional)
|
||||
missing module named python_calamine - imported by pandas.io.excel._calamine (delayed, conditional)
|
||||
missing module named botocore - imported by pandas.io.common (delayed, conditional, optional)
|
||||
missing module named collections.Mapping - imported by collections (optional), pytz.lazy (optional)
|
||||
missing module named UserDict - imported by pytz.lazy (optional)
|
||||
missing module named Foundation - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named AppKit - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named PyQt4 - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named qtpy - imported by pandas.io.clipboard (delayed, conditional, optional)
|
||||
missing module named 'sqlalchemy.engine' - imported by pandas.io.sql (delayed)
|
||||
missing module named 'sqlalchemy.types' - imported by pandas.io.sql (delayed, conditional)
|
||||
missing module named 'sqlalchemy.schema' - imported by pandas.io.sql (delayed)
|
||||
missing module named 'sqlalchemy.sql' - imported by pandas.io.sql (conditional)
|
||||
missing module named sqlalchemy - imported by pandas.io.sql (delayed, conditional)
|
||||
missing module named pandas.core.internals.Block - imported by pandas.core.internals (conditional), pandas.io.pytables (conditional)
|
||||
missing module named tables - imported by pandas.io.pytables (delayed, conditional)
|
||||
missing module named lxml - imported by sympy.utilities.mathml (delayed), pandas.io.xml (conditional)
|
||||
missing module named 'google.auth' - imported by pandas.io.gbq (conditional)
|
||||
missing module named pandas.Panel - imported by pandas (delayed, optional), tqdm.std (delayed, optional)
|
||||
missing module named 'lxml.html' - imported by pandas.io.html (delayed)
|
||||
missing module named bs4 - imported by pandas.io.html (delayed)
|
||||
missing module named 'pandas.api.internals' - imported by pyarrow.pandas_compat (delayed, conditional)
|
||||
missing module named 'pyarrow._cuda' - imported by pyarrow.cuda (top-level)
|
||||
missing module named 'pyarrow.gandiva' - imported by pyarrow.conftest (optional)
|
||||
missing module named 'pyarrow._azurefs' - imported by pyarrow.fs (optional)
|
||||
missing module named 'setuptools_scm.git' - imported by pyarrow (delayed, optional)
|
||||
missing module named setuptools_scm - imported by matplotlib (delayed, conditional, optional), pyarrow (optional), tqdm.version (optional)
|
||||
missing module named fastparquet - imported by fsspec.parquet (delayed), pyarrow.conftest (optional)
|
||||
missing module named requests_kerberos - imported by fsspec.implementations.webhdfs (delayed, conditional)
|
||||
missing module named smbprotocol - imported by fsspec.implementations.smb (top-level)
|
||||
missing module named smbclient - imported by fsspec.implementations.smb (top-level)
|
||||
missing module named paramiko - imported by fsspec.implementations.sftp (top-level)
|
||||
missing module named kerchunk - imported by fsspec.implementations.reference (delayed)
|
||||
missing module named ujson - imported by fsspec.implementations.cache_metadata (optional), fsspec.implementations.reference (optional)
|
||||
missing module named 'libarchive.ffi' - imported by fsspec.implementations.libarchive (top-level)
|
||||
missing module named libarchive - imported by fsspec.implementations.libarchive (top-level)
|
||||
missing module named uvloop - imported by aiohttp.worker (delayed)
|
||||
missing module named annotationlib - imported by attr._compat (conditional)
|
||||
missing module named async_timeout - imported by aiohttp.helpers (conditional), aiohttp.web_ws (conditional), aiohttp.client_ws (conditional)
|
||||
missing module named 'gunicorn.workers' - imported by aiohttp.worker (top-level)
|
||||
missing module named gunicorn - imported by aiohttp.worker (top-level)
|
||||
missing module named aiodns - imported by aiohttp.resolver (optional)
|
||||
missing module named pygit2 - imported by fsspec.implementations.git (top-level)
|
||||
missing module named 'distributed.worker' - imported by fsspec.implementations.dask (top-level)
|
||||
missing module named 'distributed.client' - imported by fsspec.implementations.dask (top-level)
|
||||
missing module named panel - imported by fsspec.gui (top-level)
|
||||
missing module named fuse - imported by fsspec.fuse (top-level)
|
||||
missing module named lz4 - imported by fsspec.compression (optional), joblib.compressor (optional)
|
||||
missing module named snappy - imported by fsspec.compression (delayed, optional)
|
||||
missing module named lzmaffi - imported by fsspec.compression (optional)
|
||||
missing module named isal - imported by fsspec.compression (optional)
|
||||
missing module named 'IPython.display' - imported by tqdm.notebook (conditional, optional), rich.jupyter (delayed, optional), rich.live (delayed, conditional, optional), huggingface_hub._login (delayed, optional)
|
||||
missing module named 'IPython.html' - imported by tqdm.notebook (conditional, optional)
|
||||
missing module named ipywidgets - imported by tqdm.notebook (conditional, optional), rich.live (delayed, conditional, optional)
|
||||
missing module named boto3 - imported by tensorboard.compat.tensorflow_stub.io.gfile (optional)
|
||||
missing module named 'botocore.exceptions' - imported by tensorboard.compat.tensorflow_stub.io.gfile (optional)
|
||||
missing module named tensorboard.compat.notf - imported by tensorboard.compat (delayed, optional)
|
||||
missing module named 'tensorflow.compat' - imported by tensorboard.util.op_evaluator (delayed), tensorboard.util.encoder (delayed), tensorboard.plugins.audio.summary (delayed), tensorboard.plugins.custom_scalar.summary (delayed), tensorboard.plugins.histogram.summary (delayed), tensorboard.plugins.image.summary (delayed), tensorboard.plugins.pr_curve.summary (delayed), tensorboard.plugins.scalar.summary (delayed), tensorboard.plugins.text.summary (delayed), keras.src.callbacks.tensorboard (delayed)
|
||||
missing module named 'keras.optimizers.optimizer_v2' - imported by tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named triton - imported by torch._utils_internal (delayed, conditional), torch._dynamo.logging (conditional, optional), torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed), torch._inductor.runtime.autotune_cache (conditional), torch._inductor.runtime.coordinate_descent_tuner (optional), torch._inductor.runtime.triton_heuristics (conditional, optional), torch._inductor.codegen.wrapper (delayed, conditional), torch._inductor.kernel.mm_common (delayed), torch._inductor.kernel.mm_plus_mm (delayed), torch.sparse._triton_ops_meta (delayed, conditional), torch.sparse._triton_ops (conditional), torch._dynamo.utils (conditional), torch._inductor.compile_worker.__main__ (optional), torch._inductor.runtime.triton_helpers (top-level), torch.testing._internal.triton_utils (conditional)
|
||||
missing module named 'torch._C._distributed_c10d' - imported by torch.distributed (conditional), torch.distributed.distributed_c10d (top-level), torch.distributed.constants (top-level), torch.distributed.rpc (conditional), torch.distributed.tensor._collective_utils (top-level), torch.distributed._shard.sharded_tensor.reshard (top-level), torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag (top-level), torch.testing._internal.distributed.fake_pg (top-level), torch._dynamo.variables.distributed (delayed), torch.distributed._symmetric_memory (top-level), torch.distributed.elastic.control_plane (delayed), torch.testing._internal.distributed.multi_threaded_pg (top-level)
|
||||
missing module named torch.randperm - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named torch.Generator - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named torch.default_generator - imported by torch (top-level), torch.utils.data.dataset (top-level)
|
||||
missing module named soundfile - imported by torchaudio._backend.soundfile_backend (conditional, optional)
|
||||
missing module named torch.norm_except_dim - imported by torch (top-level), torch.nn.utils.weight_norm (top-level)
|
||||
missing module named torch._weight_norm - imported by torch (top-level), torch.nn.utils.weight_norm (top-level)
|
||||
missing module named 'triton.language' - imported by torch._inductor.codegen.triton_split_scan (delayed), torch._inductor.codegen.wrapper (delayed), torch.sparse._triton_ops (conditional), torch._inductor.runtime.triton_helpers (top-level), torch.testing._internal.triton_utils (conditional)
|
||||
missing module named 'triton.runtime' - imported by torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed), torch._inductor.runtime.triton_heuristics (conditional), torch._library.triton (delayed), torch._inductor.select_algorithm (delayed, optional), torch._inductor.ir (delayed), torch._dynamo.variables.builder (delayed, conditional), torch._inductor.fx_passes.reinplace (delayed, conditional), torch._inductor.utils (delayed)
|
||||
missing module named 'triton.compiler' - imported by torch._higher_order_ops.triton_kernel_wrap (delayed), torch.utils._triton (delayed, optional), torch._inductor.runtime.hints (optional), torch._inductor.runtime.triton_heuristics (conditional, optional), torch._inductor.scheduler (delayed), torch._inductor.codegen.triton (delayed), torch._inductor.codecache (delayed, optional), torch._inductor.async_compile (delayed, optional)
|
||||
missing module named dl - imported by setuptools.command.build_ext (conditional, optional)
|
||||
missing module named 'Cython.Distutils' - imported by setuptools.command.build_ext (conditional, optional)
|
||||
missing module named 'win32com.shell' - imported by torch._appdirs (conditional, optional)
|
||||
missing module named 'com.sun' - imported by torch._appdirs (delayed, conditional, optional)
|
||||
missing module named com - imported by torch._appdirs (delayed)
|
||||
missing module named win32api - imported by torch._appdirs (delayed, conditional, optional)
|
||||
missing module named win32com - imported by torch._appdirs (delayed)
|
||||
missing module named halide - imported by torch._inductor.codecache (delayed, conditional), torch._inductor.runtime.halide_helpers (optional)
|
||||
missing module named gmpy2.qdiv - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.lcm - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.gcd - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.gcdext - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.denom - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.numer - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.mpq - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named gmpy2.mpz - imported by gmpy2 (conditional), sympy.polys.domains.groundtypes (conditional)
|
||||
missing module named 'pyglet.image' - imported by sympy.printing.preview (delayed, optional)
|
||||
missing module named 'pyglet.window' - imported by sympy.plotting.pygletplot.managed_window (top-level), sympy.plotting.pygletplot.plot_controller (top-level), sympy.printing.preview (delayed, optional)
|
||||
missing module named pyglet - imported by sympy.plotting.pygletplot.plot (optional), sympy.plotting.pygletplot.plot_axes (top-level), sympy.printing.preview (delayed, conditional, optional), sympy.testing.runtests (delayed, conditional)
|
||||
missing module named 'pyglet.gl' - imported by sympy.plotting.pygletplot.plot_axes (top-level), sympy.plotting.pygletplot.util (top-level), sympy.plotting.pygletplot.plot_window (top-level), sympy.plotting.pygletplot.plot_camera (top-level), sympy.plotting.pygletplot.plot_rotation (top-level), sympy.plotting.pygletplot.plot_curve (top-level), sympy.plotting.pygletplot.plot_mode_base (top-level), sympy.plotting.pygletplot.plot_surface (top-level)
|
||||
missing module named 'pyglet.clock' - imported by sympy.plotting.pygletplot.managed_window (top-level)
|
||||
missing module named 'sage.libs' - imported by mpmath.libmp.backend (conditional, optional), mpmath.libmp.libelefun (conditional, optional), mpmath.libmp.libmpf (conditional, optional), mpmath.libmp.libmpc (conditional, optional), mpmath.libmp.libhyper (delayed, conditional), mpmath.ctx_mp (conditional)
|
||||
missing module named sage - imported by mpmath.libmp.backend (conditional, optional)
|
||||
missing module named gmpy - imported by mpmath.libmp.backend (conditional, optional)
|
||||
missing module named pysat - imported by sympy.logic.algorithms.minisat22_wrapper (delayed)
|
||||
missing module named pycosat - imported by sympy.logic.algorithms.pycosat_wrapper (delayed)
|
||||
missing module named flint - imported by sympy.external.gmpy (delayed, optional), sympy.polys.polyutils (conditional), sympy.polys.factortools (conditional), sympy.polys.polyclasses (conditional), sympy.polys.domains.groundtypes (conditional), sympy.polys.domains.finitefield (conditional)
|
||||
missing module named all - imported by sympy.testing.runtests (delayed, optional)
|
||||
missing module named 'IPython.Shell' - imported by sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.frontend' - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.terminal' - imported by sympy.interactive.printing (delayed, conditional, optional), sympy.interactive.session (delayed, conditional)
|
||||
missing module named 'IPython.iplib' - imported by sympy.interactive.printing (delayed, optional)
|
||||
missing module named py - imported by mpmath.tests.runtests (delayed, conditional)
|
||||
missing module named 'sage.all' - imported by sympy.core.function (delayed)
|
||||
missing module named 'sage.interfaces' - imported by sympy.core.basic (delayed)
|
||||
missing module named 'cutlass_library.gemm_operation' - imported by torch._inductor.codegen.cuda.gemm_template (delayed), torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions (conditional)
|
||||
missing module named 'cutlass_library.library' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional), torch._inductor.codegen.cuda.gemm_template (delayed), torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions (conditional)
|
||||
missing module named 'cutlass_library.generator' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed)
|
||||
missing module named 'cutlass_library.manifest' - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional)
|
||||
missing module named cutlass_library - imported by torch._inductor.codegen.cuda.cutlass_utils (delayed, conditional, optional)
|
||||
missing module named torch.multiprocessing._prctl_pr_set_pdeathsig - imported by torch.multiprocessing (top-level), torch.multiprocessing.spawn (top-level)
|
||||
missing module named 'torch.utils._config_typing' - imported by torch._dynamo.config (conditional), torch._inductor.config (conditional), torch._functorch.config (conditional)
|
||||
missing module named 'torch._C._functorch' - imported by torch._subclasses.fake_tensor (top-level), torch._subclasses.meta_utils (top-level), torch._functorch.pyfunctorch (top-level), torch._higher_order_ops.cond (top-level), torch._functorch.autograd_function (top-level), torch._functorch.utils (top-level), torch._functorch.vmap (top-level), torch._functorch.eager_transforms (top-level)
|
||||
missing module named torch.trunc - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.tanh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.tan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.square - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sqrt - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sinh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sin - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.signbit - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.sign - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.round - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.reciprocal - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.rad2deg - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.negative - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_not - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log2 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log1p - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log10 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.log - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isnan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isinf - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.isfinite - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.floor - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.expm1 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.exp2 - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.exp - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.deg2rad - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.cosh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.cos - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.conj_physical - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.ceil - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_not - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctanh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctan - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arcsinh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arcsin - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arccosh - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.arccos - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.absolute - imported by torch (top-level), torch._numpy._unary_ufuncs_impl (top-level)
|
||||
missing module named torch.true_divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.subtract - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.remainder - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.pow - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.not_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.nextafter - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.multiply - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.minimum - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.maximum - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_xor - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_or - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logical_and - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logaddexp2 - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.logaddexp - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.less_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.less - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.ldexp - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.lcm - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.hypot - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.heaviside - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.greater_equal - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.greater - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.gcd - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmod - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmin - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.fmax - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.floor_divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.float_power - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.eq - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.divide - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.copysign - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_xor - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_right_shift - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_or - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_left_shift - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.bitwise_and - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.arctan2 - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch.add - imported by torch (top-level), torch._numpy._binary_ufuncs_impl (top-level)
|
||||
missing module named torch_xla - imported by torch._functorch.fx_minifier (delayed), huggingface_hub.serialization._torch (delayed, conditional)
|
||||
missing module named deeplearning - imported by torch._inductor.fx_passes.group_batch_fusion (optional)
|
||||
missing module named torch._inductor.fx_passes.fb - imported by torch._inductor.fx_passes (delayed, conditional), torch._inductor.fx_passes.pre_grad (delayed, conditional)
|
||||
missing module named 'torch_xla.distributed' - imported by torch.distributed.tensor._api (delayed, conditional, optional)
|
||||
missing module named torchdistx - imported by torch.distributed.fsdp._init_utils (optional)
|
||||
missing module named 'torch._C._distributed_rpc' - imported by torch.distributed.rpc (conditional), torch.distributed.rpc.api (top-level), torch.distributed.rpc.constants (top-level), torch.distributed.rpc.internal (top-level), torch.distributed.rpc.options (top-level), torch._jit_internal (conditional)
|
||||
missing module named foo - imported by torch._functorch.compilers (delayed)
|
||||
missing module named torch.broadcast_shapes - imported by torch (top-level), torch._numpy._funcs_impl (top-level)
|
||||
missing module named torch._numpy.float_ - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.max - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.isnan - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.signbit - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.real - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.isscalar - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.iscomplexobj - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.imag - imported by torch._numpy (delayed), torch._numpy.testing.utils (delayed)
|
||||
missing module named torch._numpy.intp - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named torch._numpy.empty - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named torch._numpy.arange - imported by torch._numpy (top-level), torch._numpy.testing.utils (top-level)
|
||||
missing module named 'onnxscript._framework_apis' - imported by torch.onnx._internal._exporter_legacy (delayed), torch.onnx._internal._lazy_import (conditional)
|
||||
missing module named onnxscript - imported by torch.onnx._internal.fx.registration (conditional), torch.onnx._internal._exporter_legacy (delayed, conditional, optional), torch.onnx._internal.fx.diagnostics (top-level), torch.onnx._internal.fx.onnxfunction_dispatcher (conditional), torch.onnx._internal.fx.fx_onnx_interpreter (top-level), torch.onnx._internal.onnxruntime (delayed, conditional, optional), torch.onnx._internal._lazy_import (conditional), torch.onnx._internal.exporter._core (top-level), torch.onnx._internal.exporter._dispatching (top-level), torch.onnx._internal.exporter._schemas (top-level), torch.onnx._internal.exporter._registration (conditional), torch.onnx._internal.exporter._building (top-level), torch.onnx._internal.exporter._tensors (top-level), torch.onnx._internal.exporter._ir_passes (top-level), torch.onnx._internal.exporter._reporting (conditional)
|
||||
missing module named 'onnx.onnx_cpp2py_export.defs' - imported by onnx.defs (top-level), onnx.reference.ops._op_list (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.version_converter' - imported by onnx.version_converter (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.shape_inference' - imported by onnx.shape_inference (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.printer' - imported by onnx.printer (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.parser' - imported by onnx.parser (top-level)
|
||||
missing module named 'onnx.onnx_cpp2py_export.checker' - imported by onnx.checker (top-level)
|
||||
missing module named pyinstrument - imported by torch.onnx._internal.exporter._core (delayed, conditional)
|
||||
missing module named 'onnxscript.ir' - imported by torch.onnx._internal.exporter._core (top-level), torch.onnx._internal.exporter._building (top-level)
|
||||
missing module named 'onnxscript.evaluator' - imported by torch.onnx._internal.exporter._core (top-level)
|
||||
missing module named onnxruntime.capi.build_and_package_info - imported by onnxruntime.capi.onnxruntime_validation (delayed, conditional, optional)
|
||||
missing module named 'onnxruntime.training' - imported by onnxruntime.capi.onnxruntime_validation (delayed, optional)
|
||||
missing module named 'onnxscript.function_libs' - imported by torch.onnx._internal.fx.diagnostics (top-level), torch.onnx._internal.fx.onnxfunction_dispatcher (conditional), torch.onnx._internal.fx.decomposition_skip (top-level), torch.onnx._internal.fx.fx_onnx_interpreter (top-level), torch.onnx._internal.exporter._ir_passes (delayed, optional)
|
||||
missing module named 'onnx.defs.OpSchema' - imported by torch.onnx._internal.fx.type_utils (conditional)
|
||||
missing module named transformers - imported by torch._dynamo.variables.dicts (delayed), torch.onnx._internal.fx.patcher (delayed, conditional, optional), torch.onnx._internal.fx.dynamo_graph_extractor (delayed, optional), nncf.data.generators (delayed, optional), torch.testing._internal.common_distributed (delayed, optional)
|
||||
missing module named accimage - imported by torchvision.transforms.transforms (optional), torchvision.transforms.functional (optional), torchvision.transforms._functional_pil (optional), torchvision.datasets.folder (delayed)
|
||||
missing module named torch.ao.quantization.QuantStub - imported by torch.ao.quantization (top-level), torchvision.models.quantization.mobilenetv2 (top-level), torchvision.models.quantization.mobilenetv3 (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.DeQuantStub - imported by torch.ao.quantization (top-level), torchvision.models.quantization.mobilenetv2 (top-level), torchvision.models.quantization.mobilenetv3 (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named 'monkeytype.tracing' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'monkeytype.db' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'monkeytype.config' - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named monkeytype - imported by torch.jit._monkeytype_config (optional)
|
||||
missing module named 'torch._C._jit_tree_views' - imported by torch._sources (top-level), torch.jit.frontend (top-level)
|
||||
missing module named wcwidth - imported by tabulate (optional)
|
||||
missing module named torch.ao.quantization.QConfig - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.qconfig_mapping_utils (top-level), torch.ao.quantization.fx.lstm_utils (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QConfigMapping - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.custom_config (top-level), torch.ao.ns.fx.n_shadows_utils (top-level), torch.ao.ns.fx.qconfig_multi_mapping (top-level), torch.ao.ns._numeric_suite_fx (top-level), torch.ao.quantization.fx.lstm_utils (top-level), torch.ao.quantization.pt2e.prepare (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QuantType - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.utils (top-level), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named torch.ao.quantization.QConfigAny - imported by torch.ao.quantization (top-level), torch.ao.quantization.fx.utils (top-level)
|
||||
missing module named torch.ao.quantization.float_qparams_weight_only_qconfig - imported by torch.ao.quantization (delayed, conditional), torch.ao.nn.quantized.modules.embedding_ops (delayed, conditional), torch.testing._internal.common_quantization (top-level)
|
||||
missing module named pycocotools - imported by torchvision.datasets.coco (delayed), torchvision.tv_tensors._dataset_wrapper (delayed)
|
||||
missing module named gdown - imported by torchvision.datasets.utils (delayed, optional)
|
||||
missing module named 'IPython.utils' - imported by h5py.ipy_completer (top-level)
|
||||
missing module named mpi4py - imported by h5py._hl.files (delayed)
|
||||
missing module named lmdb - imported by torchvision.datasets.lsun (delayed)
|
||||
missing module named 'onnxscript.rewriter' - imported by torch.onnx._internal.onnxruntime (delayed, conditional, optional)
|
||||
missing module named 'torch._C._onnx' - imported by torch.onnx (top-level), torch.onnx.utils (top-level), torch.onnx.symbolic_helper (top-level), torch.onnx._globals (top-level), torch.onnx.symbolic_opset9 (top-level), torch.onnx.symbolic_opset10 (top-level), torch.onnx.symbolic_opset13 (top-level), torch.onnx._experimental (top-level), torch.onnx.verification (top-level)
|
||||
missing module named torchrec - imported by torch._dynamo.variables.user_defined (delayed)
|
||||
missing module named 'torch._C._lazy_ts_backend' - imported by torch._lazy.ts_backend (top-level), torch._lazy.computation (top-level)
|
||||
missing module named 'torch._C._lazy' - imported by torch._lazy (top-level), torch._lazy.device_context (top-level), torch._lazy.metrics (top-level), torch._lazy.computation (top-level), torch._lazy.config (top-level), torch._lazy.debug (top-level), torch._lazy.ir_cache (top-level)
|
||||
missing module named hypothesis - imported by torch.testing._internal.common_utils (optional), torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named 'numba.cuda' - imported by torch.testing._internal.common_cuda (conditional, optional)
|
||||
missing module named 'xmlrunner.result' - imported by torch.testing._internal.common_utils (delayed, conditional)
|
||||
missing module named xmlrunner - imported by torch.testing._internal.common_utils (delayed, conditional)
|
||||
missing module named expecttest - imported by torch.testing._internal.common_utils (top-level)
|
||||
missing module named '_pytest.recwarn' - imported by torch._dynamo.variables.user_defined (delayed, optional)
|
||||
missing module named _pytest - imported by torch._dynamo.variables.user_defined (delayed, optional)
|
||||
missing module named 'torch._C._dynamo' - imported by torch._guards (top-level), torch._dynamo.convert_frame (top-level), torch._dynamo.guards (top-level), torch._dynamo.eval_frame (top-level), torch._dynamo.decorators (conditional), torch._dynamo.types (top-level)
|
||||
missing module named pygraphviz - imported by networkx.drawing.nx_agraph (delayed, optional)
|
||||
missing module named 'triton.backends' - imported by torch._inductor.runtime.triton_heuristics (conditional, optional)
|
||||
missing module named 'triton.testing' - imported by torch._inductor.runtime.benchmarking (delayed, optional), torch._inductor.utils (delayed)
|
||||
missing module named 'torch_xla.core' - imported by torch._dynamo.testing (delayed, conditional), huggingface_hub.serialization._torch (delayed, conditional, optional), torch._dynamo.backends.torchxla (delayed, optional)
|
||||
missing module named torch.float16 - imported by torch (delayed, conditional), torch._inductor.codegen.cpp_wrapper_cuda (delayed, conditional)
|
||||
missing module named torch.bfloat16 - imported by torch (delayed, conditional), torch._inductor.codegen.cpp_wrapper_cuda (delayed, conditional)
|
||||
missing module named torch.ScriptObject - imported by torch (delayed), torch.export.graph_signature (delayed)
|
||||
missing module named moviepy - imported by torch.utils.tensorboard.summary (delayed, optional)
|
||||
missing module named 'torch._C._monitor' - imported by torch.monitor (top-level)
|
||||
missing module named 'libfb.py' - imported by torch._dynamo.debug_utils (conditional), torch._inductor.codecache (delayed, conditional), torch._inductor.compile_worker.subproc_pool (delayed, conditional)
|
||||
missing module named 'torch._inductor.fb' - imported by torch._inductor.runtime.autotune_cache (delayed, conditional, optional), torch._inductor.cpp_builder (conditional), torch._inductor.graph (conditional), torch._inductor.codecache (delayed, conditional, optional), torch._inductor.compile_fx (delayed, conditional, optional)
|
||||
missing module named 'triton.fb' - imported by torch._inductor.cpp_builder (conditional), torch._inductor.codecache (conditional)
|
||||
missing module named rfe - imported by torch._inductor.remote_cache (conditional)
|
||||
missing module named redis - imported by torch._inductor.remote_cache (optional)
|
||||
missing module named 'ck4inductor.universal_gemm' - imported by torch._inductor.utils (delayed, optional)
|
||||
missing module named ck4inductor - imported by torch._inductor.utils (delayed, optional)
|
||||
missing module named libfb - imported by torch._inductor.config (conditional, optional)
|
||||
missing module named amdsmi - imported by torch.cuda (conditional, optional), torch.cuda.memory (delayed, conditional, optional)
|
||||
missing module named pynvml - imported by torch.cuda (delayed, conditional, optional), torch.cuda.memory (delayed, conditional, optional)
|
||||
missing module named torch.device - imported by torch (top-level), torch.types (top-level), torch.nn.modules.module (top-level), torch.cuda (top-level), torch.xpu (top-level), torch._inductor.graph (top-level), torch.distributed.nn.api.remote_module (top-level), torch._library.infer_schema (top-level), torch.cpu (top-level), torch.mtia (top-level)
|
||||
missing module named 'torch._C._profiler' - imported by torch.utils._traceback (delayed), torch.profiler (top-level), torch.autograd.profiler (top-level), torch.profiler.profiler (top-level), torch.profiler._memory_profiler (top-level), torch.cuda._memory_viz (delayed), torch.testing._internal.logging_tensor (top-level), torch.autograd (top-level), torch.profiler._pattern_matcher (top-level)
|
||||
missing module named 'torch._C._autograd' - imported by torch._subclasses.meta_utils (top-level), torch.profiler (top-level), torch.profiler._memory_profiler (top-level), torch.autograd (top-level)
|
||||
missing module named z3 - imported by torch.fx.experimental.validator (optional), torch.fx.experimental.migrate_gradual_types.transform_to_z3 (optional), torch.fx.experimental.migrate_gradual_types.z3_types (optional)
|
||||
missing module named torch.Size - imported by torch (top-level), torch.types (top-level), torch.nn.modules.normalization (top-level)
|
||||
missing module named torch.nn.Sequential - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ParameterList - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ParameterDict - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ModuleList - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ModuleDict - imported by torch.nn (top-level), torch.testing._internal.common_utils (top-level)
|
||||
missing module named torch.nn.ReLU - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Linear - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv3d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv2d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Conv1d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm3d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm2d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.BatchNorm1d - imported by torch.nn (top-level), torch.ao.nn.intrinsic.modules.fused (top-level)
|
||||
missing module named torch.nn.Module - imported by torch.nn (top-level), torch.optim.swa_utils (top-level), torch.ao.quantization.fake_quantize (top-level), torch.jit._recursive (top-level), torch.jit._script (top-level), torch.jit._trace (top-level), torch._dynamo.mutation_guard (top-level), torch.fx.passes.utils.common (top-level), torch.distributed.nn.api.remote_module (top-level), torchaudio.models.wav2vec2.utils.import_fairseq (top-level), torchaudio.models.wav2vec2.model (top-level), torchaudio.models.wav2vec2.components (top-level), torchaudio.models.wav2vec2.utils.import_huggingface (top-level), torchaudio.pipelines._wav2vec2.impl (top-level), torch.fx.experimental.proxy_tensor (top-level), nncf.torch.utils (top-level), nncf.torch.debug (top-level), nncf.common.factory (delayed, conditional), nncf.torch.model_creation (top-level)
|
||||
missing module named torch.qscheme - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torch.layout - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torch.DispatchKey - imported by torch (top-level), torch.types (top-level)
|
||||
missing module named torchaudio._internal.fb - imported by torchaudio._internal (optional)
|
||||
missing module named sentencepiece - imported by torchaudio.pipelines.rnnt_pipeline (delayed)
|
||||
missing module named dp - imported by torchaudio.pipelines._tts.utils (delayed)
|
||||
missing module named kaldi_io - imported by torchaudio.kaldi_io (delayed)
|
||||
missing module named av.video._VideoCodecName - imported by av.video (top-level), av.codec.context (top-level), av.container.output (top-level)
|
||||
missing module named 'cython.cimports' - imported by av.packet (top-level), av.audio.codeccontext (top-level), av.filter.loudnorm (top-level)
|
||||
missing module named av.audio._AudioCodecName - imported by av.audio (top-level), av.codec.context (top-level), av.container.output (top-level)
|
||||
missing module named torcharrow - imported by torch.utils.data.datapipes.iter.callable (delayed, conditional, optional)
|
||||
missing module named _dbm - imported by dbm.ndbm (top-level)
|
||||
missing module named _gdbm - imported by dbm.gnu (top-level)
|
||||
missing module named diff - imported by dill._dill (delayed, conditional, optional)
|
||||
missing module named dill.diff - imported by dill (delayed, conditional, optional), dill._dill (delayed, conditional, optional)
|
||||
missing module named version - imported by dill (optional)
|
||||
missing module named 'jax.typing' - imported by optree.integrations.jax (top-level)
|
||||
missing module named 'jax._src' - imported by optree.integrations.jax (top-level), keras.src.backend.jax.nn (delayed, optional)
|
||||
missing module named 'torch._C._distributed_autograd' - imported by torch.distributed.autograd (conditional)
|
||||
missing module named 'einops._torch_specific' - imported by torch._dynamo.decorators (delayed, optional)
|
||||
missing module named einops - imported by torch._dynamo.decorators (delayed)
|
||||
missing module named keras.src.backend.random_seed_dtype - imported by keras.src.backend (delayed), keras.src.random.seed_generator (delayed)
|
||||
missing module named keras.src.backend.convert_to_tensor - imported by keras.src.backend (delayed), keras.src.random.seed_generator (delayed)
|
||||
missing module named 'openvino._pyopenvino.util' - imported by openvino.utils (delayed), openvino.runtime.utils (top-level)
|
||||
missing module named 'openvino._pyopenvino.op' - imported by openvino.runtime.op (top-level), openvino.runtime.op.util (top-level), nncf.openvino.optimized_functions.models (top-level)
|
||||
missing module named 'jax.nn' - imported by keras.src.backend.jax.nn (delayed, optional)
|
||||
missing module named 'jax.scipy' - imported by keras.src.backend.jax.linalg (top-level)
|
||||
missing module named 'tensorflow.experimental' - imported by keras.src.backend.tensorflow.distribution_lib (top-level)
|
||||
missing module named pygments.lexers.PrologLexer - imported by pygments.lexers (top-level), pygments.lexers.cplint (top-level)
|
||||
missing module named ctags - imported by pygments.formatters.html (optional)
|
||||
missing module named linkify_it - imported by markdown_it.main (optional)
|
||||
missing module named 'tensorflow.saved_model' - imported by keras.src.export.saved_model (delayed)
|
||||
missing module named 'tensorflow.summary' - imported by keras.src.callbacks.tensorboard (delayed, conditional)
|
||||
missing module named pydantic - imported by huggingface_hub.utils._runtime (delayed, optional), huggingface_hub._webhooks_payload (conditional)
|
||||
missing module named 'google.colab' - imported by huggingface_hub.utils._auth (delayed, optional)
|
||||
missing module named hf_transfer - imported by huggingface_hub.file_download (delayed, conditional, optional), huggingface_hub.lfs (delayed, optional)
|
||||
missing module named hf_xet - imported by huggingface_hub.file_download (delayed, optional), huggingface_hub._commit_api (delayed)
|
||||
missing module named 'mcp.client' - imported by huggingface_hub.inference._mcp.mcp_client (delayed, conditional)
|
||||
missing module named mcp - imported by huggingface_hub.inference._mcp.utils (conditional), huggingface_hub.inference._mcp.mcp_client (delayed, conditional)
|
||||
missing module named fastai - imported by huggingface_hub.fastai_utils (delayed)
|
||||
missing module named 'fastapi.responses' - imported by huggingface_hub._oauth (delayed, optional), huggingface_hub._webhooks_server (conditional)
|
||||
missing module named fastapi - imported by huggingface_hub._oauth (delayed, conditional, optional), huggingface_hub._webhooks_server (conditional)
|
||||
missing module named gradio - imported by huggingface_hub._webhooks_server (delayed, conditional)
|
||||
missing module named tensorboardX - imported by huggingface_hub._tensorboard_logger (conditional, optional)
|
||||
missing module named 'starlette.datastructures' - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named 'authlib.integrations' - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named authlib - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named starlette - imported by huggingface_hub._oauth (delayed, optional)
|
||||
missing module named 'ipywidgets.widgets' - imported by huggingface_hub._login (delayed, optional)
|
||||
missing module named 'InquirerPy.separator' - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named 'InquirerPy.base' - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named InquirerPy - imported by huggingface_hub.commands.delete_cache (optional)
|
||||
missing module named pydotplus - imported by keras.src.utils.model_visualization (optional), tensorflow.python.keras.utils.vis_utils (optional)
|
||||
missing module named pydot_ng - imported by keras.src.utils.model_visualization (optional), tensorflow.python.keras.utils.vis_utils (optional)
|
||||
missing module named keras.src.ops.convert_to_tensor - imported by keras.src.ops (top-level), keras.src.utils.torch_utils (top-level)
|
||||
missing module named keras.src.ops.convert_to_numpy - imported by keras.src.ops (top-level), keras.src.utils.torch_utils (top-level)
|
||||
missing module named keras.src.backend.random - imported by keras.src.backend (top-level), keras.src.ops (top-level), keras.src.testing.test_case (delayed), keras.src.initializers.random_initializers (top-level)
|
||||
missing module named keras.src.backend.is_tensor - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.backend.cond - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.backend.cast - imported by keras.src.backend (top-level), keras.src.ops (top-level)
|
||||
missing module named keras.src.engine - imported by keras.src (conditional), nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named keras.engine - imported by keras (conditional), nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named flax - imported by keras.src.utils.jax_layer (delayed)
|
||||
missing module named array_api_strict - imported by sklearn.utils._array_api (delayed, conditional, optional)
|
||||
missing module named sklearn.externals.array_api_compat.common.array_namespace - imported by sklearn.externals.array_api_compat.common (top-level), sklearn.externals.array_api_compat.dask.array._aliases (top-level)
|
||||
missing module named 'cupy_backends.cuda' - imported by sklearn.externals.array_api_compat.common._helpers (delayed)
|
||||
missing module named torch.outer - imported by torch (top-level), sklearn.externals.array_api_compat.torch.linalg (top-level)
|
||||
missing module named 'cupy.linalg' - imported by sklearn.externals.array_api_compat.cupy.linalg (top-level)
|
||||
missing module named 'cupy.fft' - imported by sklearn.externals.array_api_compat.cupy.fft (top-level)
|
||||
missing module named array_api_compat - imported by sklearn.externals.array_api_extra._lib._utils._compat (optional)
|
||||
missing module named 'numpydoc.docscrape' - imported by sklearn.utils._testing (delayed)
|
||||
missing module named numpydoc - imported by sklearn.utils._testing (delayed, optional)
|
||||
missing module named 'distributed.utils' - imported by joblib._dask (conditional, optional)
|
||||
missing module named 'dask.utils' - imported by joblib._dask (conditional)
|
||||
missing module named 'dask.sizeof' - imported by joblib._dask (conditional)
|
||||
missing module named 'dask.distributed' - imported by joblib._dask (conditional)
|
||||
missing module named viztracer - imported by joblib.externals.loky.initializers (delayed, optional)
|
||||
missing module named 'lz4.frame' - imported by joblib.compressor (optional)
|
||||
missing module named pyamg - imported by sklearn.manifold._spectral_embedding (delayed, conditional, optional)
|
||||
missing module named 'tf_keras.optimizers' - imported by tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named tf_keras - imported by tensorflow.python.util.lazy_loader (delayed, conditional, optional), huggingface_hub.keras_mixin (conditional, optional), tensorflow.python.saved_model.load (delayed, conditional, optional)
|
||||
missing module named objgraph - imported by tensorflow.python.distribute.test_util (optional)
|
||||
missing module named tblib - imported by tensorflow.python.distribute.multi_process_runner (optional)
|
||||
missing module named tensorflow.python.framework.fast_tensor_util - imported by tensorflow.python.framework (optional), tensorflow.python.framework.tensor_util (optional)
|
||||
missing module named portpicker - imported by tensorflow.python.framework.test_util (delayed), tensorflow.dtensor.python.tests.multi_client_test_util (top-level), tensorflow.python.debug.lib.grpc_debug_test_server (top-level)
|
||||
missing module named 'tensorflow.python.framework.is_mlir_bridge_test_true' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'tensorflow.python.framework.is_mlir_bridge_test_false' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'tensorflow.python.framework.is_xla_test_true' - imported by tensorflow.python.framework.test_util (optional)
|
||||
missing module named 'six.moves.urllib.request' - imported by tensorflow.python.keras.utils.data_utils (top-level)
|
||||
missing module named tensorflow.python.keras.__version__ - imported by tensorflow.python.keras (delayed), tensorflow.python.keras.saving.saving_utils (delayed), tensorflow.python.keras.saving.hdf5_format (delayed), tensorflow.python.keras.engine.training (delayed)
|
||||
missing module named tensorflow.python.keras.layers.wrappers - imported by tensorflow.python.keras.layers (delayed), tensorflow.python.keras.utils.vis_utils (delayed)
|
||||
missing module named 'tensorflow.python.training.tracking' - imported by openvino.frontend.tensorflow.utils (delayed, optional)
|
||||
missing module named paddle - imported by openvino.tools.ovc.moc_frontend.shape_utils (delayed, conditional), openvino.tools.ovc.moc_frontend.type_utils (delayed, conditional), openvino.tools.ovc.moc_frontend.paddle_frontend_utils (delayed, optional), openvino.tools.ovc.convert_impl (delayed, conditional)
|
||||
missing module named 'conda.cli' - imported by torch.utils.benchmark.examples.blas_compare_setup (optional)
|
||||
missing module named conda - imported by torch.utils.benchmark.examples.blas_compare_setup (optional)
|
||||
missing module named 'hypothesis.strategies' - imported by torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named 'hypothesis.extra' - imported by torch.testing._internal.hypothesis_utils (top-level)
|
||||
missing module named torch.tensor - imported by torch (top-level), torch.utils.benchmark.utils.compare (top-level)
|
||||
missing module named torch.TensorType - imported by torch (top-level), torch.jit._passes._property_propagation (top-level)
|
||||
missing module named 'torch._C._distributed_rpc_testing' - imported by torch.distributed.rpc._testing (conditional)
|
||||
missing module named etcd - imported by torch.distributed.elastic.rendezvous.etcd_rendezvous (top-level), torch.distributed.elastic.rendezvous.etcd_store (top-level), torch.distributed.elastic.rendezvous.etcd_rendezvous_backend (top-level), torch.distributed.elastic.rendezvous.etcd_server (optional)
|
||||
missing module named 'torch.distributed.elastic.metrics.static_init' - imported by torch.distributed.elastic.metrics (optional)
|
||||
missing module named 'coremltools.models' - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named 'coremltools.converters' - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named coremltools - imported by torch.backends._coreml.preprocess (top-level)
|
||||
missing module named pytorch_lightning - imported by torch.ao.pruning._experimental.data_sparsifier.lightning.callbacks.data_sparsity (top-level)
|
||||
missing module named fbscribelogger - imported by torch._logging.scribe (optional)
|
||||
missing module named 'tvm.contrib' - imported by torch._dynamo.backends.tvm (delayed)
|
||||
missing module named tvm - imported by torch._dynamo.backends.tvm (delayed, conditional)
|
||||
missing module named 'torch._C._VariableFunctions' - imported by torch (conditional)
|
||||
missing module named 'tensorflow.contrib' - imported by tensorflow.python.tools.import_pb_to_tensorboard (optional)
|
||||
missing module named memory_profiler - imported by tensorflow.python.eager.memory_tests.memory_test_util (optional)
|
||||
missing module named six.moves.urllib.request - imported by six.moves.urllib (top-level), tensorflow.python.distribute.failure_handling.failure_handling_util (top-level)
|
||||
missing module named grpc_reflection - imported by grpc (optional)
|
||||
missing module named grpc_health - imported by grpc (optional)
|
||||
missing module named grpc_tools - imported by grpc._runtime_protos (delayed, optional), grpc (optional)
|
||||
missing module named 'grpc_tools.protoc' - imported by grpc._runtime_protos (delayed, conditional)
|
||||
missing module named tflite_runtime - imported by tensorflow.lite.python.metrics.metrics (conditional), tensorflow.lite.python.interpreter (conditional), tensorflow.lite.python.analyzer (conditional), tensorflow.lite.tools.visualize (conditional)
|
||||
missing module named awq - imported by openvino.frontend.pytorch.quantized (delayed, conditional, optional)
|
||||
missing module named 'transformers.pytorch_utils' - imported by openvino.frontend.pytorch.patch_model (delayed, optional)
|
||||
missing module named 'jax.lax' - imported by openvino.frontend.jax.passes (top-level)
|
||||
missing module named 'jax.core' - imported by openvino.frontend.jax.jaxpr_decoder (top-level)
|
||||
missing module named 'keras.src.utils.control_flow_util' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.src.engine.keras_tensor' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.utils.control_flow_util' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named 'keras.engine.keras_tensor' - imported by nncf.tensorflow.tf_internals (conditional)
|
||||
missing module named rpds.List - imported by rpds (top-level), referencing._core (top-level)
|
||||
missing module named rpds.HashTrieSet - imported by rpds (top-level), referencing._core (top-level)
|
||||
missing module named rpds.HashTrieMap - imported by rpds (top-level), referencing._core (top-level), jsonschema._types (top-level), jsonschema.validators (top-level)
|
||||
missing module named isoduration - imported by jsonschema._format (top-level)
|
||||
missing module named uri_template - imported by jsonschema._format (top-level)
|
||||
missing module named jsonpointer - imported by jsonschema._format (top-level)
|
||||
missing module named webcolors - imported by jsonschema._format (top-level)
|
||||
missing module named rfc3339_validator - imported by jsonschema._format (top-level)
|
||||
missing module named rfc3986_validator - imported by jsonschema._format (optional)
|
||||
missing module named rfc3987 - imported by jsonschema._format (optional)
|
||||
missing module named fqdn - imported by jsonschema._format (top-level)
|
||||
missing module named openvino.properties.hint.inference_precision - imported by openvino.properties.hint (top-level), nncf.quantization.algorithms.accuracy_control.openvino_backend (top-level), nncf.openvino.engine (top-level)
|
||||
missing module named 'openvino._pyopenvino.properties' - imported by openvino.runtime.properties (top-level), openvino.runtime.properties.hint (top-level), openvino.properties (top-level), openvino.properties.hint (top-level), openvino.properties.intel_cpu (top-level), openvino.properties.intel_gpu (top-level), openvino.properties.intel_auto (top-level), openvino.properties.device (top-level), openvino.properties.log (top-level), openvino.properties.streams (top-level), nncf.openvino.optimized_functions.models (top-level)
|
||||
missing module named 'openvino._pyopenvino._offline_transformations' - imported by openvino._offline_transformations (top-level)
|
||||
missing module named 'transformers.utils' - imported by nncf.data.generators (delayed, optional)
|
||||
missing module named icu - imported by natsort.compat.locale (optional), natsort.natsort (conditional, optional)
|
||||
missing module named fastnumbers - imported by natsort.compat.fastnumbers (conditional, optional)
|
||||
missing module named 'openvino._pyopenvino.preprocess' - imported by openvino.preprocess (top-level)
|
||||
missing module named ui - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
missing module named splash - imported by D:\Downloads\qt_app_pyside\khatam\qt_app_pyside\main.py (delayed, optional)
|
||||
BIN
qt_app_pyside1/build/TrafficMonitor/xref-TrafficMonitor.html
LFS
Normal file
BIN
qt_app_pyside1/build/TrafficMonitor/xref-TrafficMonitor.html
LFS
Normal file
Binary file not shown.
93
qt_app_pyside1/build_analysis_report.md
Normal file
93
qt_app_pyside1/build_analysis_report.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# 🔍 PyInstaller Build Analysis Report
|
||||
*Generated: July 5, 2025*
|
||||
|
||||
## 🚨 Critical Issues Identified
|
||||
|
||||
### 1. **Hidden Import Failures**
|
||||
- **ERROR**: `ui.main_window` not found
|
||||
- **ERROR**: `controllers` not found
|
||||
- **CAUSE**: PyInstaller cannot find these modules as packages
|
||||
- **IMPACT**: Runtime import failures for UI and controller modules
|
||||
|
||||
### 2. **Module Structure Issues**
|
||||
- **PROBLEM**: Treating folders as modules without proper `__init__.py` files
|
||||
- **AFFECTED**: `ui/`, `controllers/`, `utils/` directories
|
||||
- **CONSEQUENCE**: Import resolution failures
|
||||
|
||||
### 3. **Massive Dependencies**
|
||||
- **SIZE**: Build includes TensorFlow (2.19.0), PyTorch (2.5.1), SciKit-learn, etc.
|
||||
- **IMPACT**: ~800MB+ executable with unnecessary ML libraries
|
||||
- **BLOAT**: Most dependencies unused by traffic monitoring app
|
||||
|
||||
### 4. **Deprecation Warnings**
|
||||
- **TorchScript**: Multiple deprecation warnings
|
||||
- **torch.distributed**: Legacy API warnings
|
||||
- **NNCF**: Version mismatch warnings (torch 2.5.1 vs recommended 2.6.*)
|
||||
|
||||
## ✅ Successful Components
|
||||
- ✓ PySide6 Qt framework detected and integrated
|
||||
- ✓ OpenCV (cv2) hooks processed successfully
|
||||
- ✓ NumPy and core scientific libraries included
|
||||
- ✓ Build completed without fatal errors
|
||||
|
||||
## 🛠️ Recommended Fixes
|
||||
|
||||
### **Immediate Fixes**
|
||||
1. **Add `__init__.py` files** to make directories proper Python packages
|
||||
2. **Fix hidden imports** with correct module paths
|
||||
3. **Exclude unused dependencies** to reduce size
|
||||
4. **Add specific imports** for UI components
|
||||
|
||||
### **Optimized Build Command**
|
||||
```bash
|
||||
pyinstaller --onefile --console --name=FixedDebug ^
|
||||
--add-data="ui;ui" ^
|
||||
--add-data="controllers;controllers" ^
|
||||
--add-data="utils;utils" ^
|
||||
--add-data="config.json;." ^
|
||||
--hidden-import=ui.main_window ^
|
||||
--hidden-import=controllers.video_controller_new ^
|
||||
--hidden-import=utils.crosswalk_utils_advanced ^
|
||||
--hidden-import=utils.traffic_light_utils ^
|
||||
--hidden-import=cv2 ^
|
||||
--hidden-import=openvino ^
|
||||
--hidden-import=numpy ^
|
||||
--hidden-import=PySide6.QtCore ^
|
||||
--hidden-import=PySide6.QtWidgets ^
|
||||
--hidden-import=PySide6.QtGui ^
|
||||
--exclude-module=tensorflow ^
|
||||
--exclude-module=torch ^
|
||||
--exclude-module=sklearn ^
|
||||
--exclude-module=matplotlib ^
|
||||
--exclude-module=pandas ^
|
||||
main.py
|
||||
```
|
||||
|
||||
### **Size Optimization**
|
||||
- **Current**: ~800MB+ with ML libraries
|
||||
- **Optimized**: ~200-300MB without unused dependencies
|
||||
- **Core only**: PySide6 + OpenVINO + OpenCV + app code
|
||||
|
||||
## 🎯 Runtime Risk Assessment
|
||||
|
||||
### **High Risk**
|
||||
- UI module import failures
|
||||
- Controller module missing
|
||||
- Configuration file access issues
|
||||
|
||||
### **Medium Risk**
|
||||
- Missing utility modules
|
||||
- OpenVINO model loading
|
||||
- Resource file access
|
||||
|
||||
### **Low Risk**
|
||||
- Core PySide6 functionality
|
||||
- OpenCV operations
|
||||
- Basic Python libraries
|
||||
|
||||
## 📋 Next Steps
|
||||
1. Create missing `__init__.py` files
|
||||
2. Test optimized build command
|
||||
3. Run executable and capture any runtime errors
|
||||
4. Verify all UI components load correctly
|
||||
5. Test complete pipeline functionality
|
||||
189
qt_app_pyside1/build_exe.py
Normal file
189
qt_app_pyside1/build_exe.py
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive build script for Traffic Monitor application
|
||||
This script handles the complete build process with all necessary PyInstaller flags
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
def run_command(command, description):
|
||||
"""Run a command and handle errors"""
|
||||
print(f"\n🔧 {description}")
|
||||
print(f"Running: {command}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
|
||||
print("✅ Success!")
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"❌ Error: {e}")
|
||||
if e.stdout:
|
||||
print("STDOUT:", e.stdout)
|
||||
if e.stderr:
|
||||
print("STDERR:", e.stderr)
|
||||
return False
|
||||
|
||||
def build_application():
|
||||
"""Build the application with PyInstaller"""
|
||||
|
||||
# Get current directory
|
||||
current_dir = Path.cwd()
|
||||
print(f"Building from: {current_dir}")
|
||||
|
||||
# Clean previous builds
|
||||
print("\n🧹 Cleaning previous builds...")
|
||||
for folder in ['build', 'dist']:
|
||||
if os.path.exists(folder):
|
||||
shutil.rmtree(folder)
|
||||
print(f"Removed {folder}")
|
||||
|
||||
if os.path.exists('TrafficMonitor.spec'):
|
||||
os.remove('TrafficMonitor.spec')
|
||||
print("Removed old spec file")
|
||||
|
||||
# Define PyInstaller command with all necessary flags
|
||||
pyinstaller_cmd = [
|
||||
'pyinstaller',
|
||||
'--name=TrafficMonitor',
|
||||
'--windowed', # Remove for debugging
|
||||
'--onefile',
|
||||
'--icon=resources/icon.ico' if os.path.exists('resources/icon.ico') else '',
|
||||
|
||||
# Add data files and folders
|
||||
'--add-data=ui;ui',
|
||||
'--add-data=controllers;controllers',
|
||||
'--add-data=utils;utils',
|
||||
'--add-data=openvino_models;openvino_models',
|
||||
'--add-data=resources;resources' if os.path.exists('resources') else '',
|
||||
'--add-data=config.json;.',
|
||||
'--add-data=splash.py;.',
|
||||
|
||||
# Hidden imports for modules PyInstaller might miss
|
||||
'--hidden-import=cv2',
|
||||
'--hidden-import=openvino',
|
||||
'--hidden-import=numpy',
|
||||
'--hidden-import=PySide6.QtCore',
|
||||
'--hidden-import=PySide6.QtWidgets',
|
||||
'--hidden-import=PySide6.QtGui',
|
||||
'--hidden-import=json',
|
||||
'--hidden-import=os',
|
||||
'--hidden-import=sys',
|
||||
'--hidden-import=time',
|
||||
'--hidden-import=traceback',
|
||||
'--hidden-import=pathlib',
|
||||
|
||||
# Main script
|
||||
'main.py'
|
||||
]
|
||||
|
||||
# Remove empty icon parameter if no icon exists
|
||||
pyinstaller_cmd = [arg for arg in pyinstaller_cmd if arg and not arg.startswith('--icon=') or os.path.exists(arg.split('=')[1] if '=' in arg else '')]
|
||||
|
||||
# Convert to string command
|
||||
cmd_str = ' '.join(f'"{arg}"' if ' ' in arg else arg for arg in pyinstaller_cmd)
|
||||
|
||||
# Build the application
|
||||
if run_command(cmd_str, "Building Traffic Monitor application"):
|
||||
print(f"\n✅ Build completed successfully!")
|
||||
print(f"Executable location: {current_dir}/dist/TrafficMonitor.exe")
|
||||
return True
|
||||
else:
|
||||
print(f"\n❌ Build failed!")
|
||||
return False
|
||||
|
||||
def build_debug_version():
|
||||
"""Build a debug version with console output"""
|
||||
|
||||
print("\n🔧 Building debug version...")
|
||||
|
||||
# Define PyInstaller command for debug build
|
||||
pyinstaller_cmd = [
|
||||
'pyinstaller',
|
||||
'--name=TrafficMonitorDebug',
|
||||
'--console', # Enable console for debugging
|
||||
'--onefile',
|
||||
|
||||
# Add data files and folders
|
||||
'--add-data=ui;ui',
|
||||
'--add-data=controllers;controllers',
|
||||
'--add-data=utils;utils',
|
||||
'--add-data=openvino_models;openvino_models',
|
||||
'--add-data=resources;resources' if os.path.exists('resources') else '',
|
||||
'--add-data=config.json;.',
|
||||
'--add-data=splash.py;.',
|
||||
|
||||
# Hidden imports
|
||||
'--hidden-import=cv2',
|
||||
'--hidden-import=openvino',
|
||||
'--hidden-import=numpy',
|
||||
'--hidden-import=PySide6.QtCore',
|
||||
'--hidden-import=PySide6.QtWidgets',
|
||||
'--hidden-import=PySide6.QtGui',
|
||||
'--hidden-import=json',
|
||||
'--hidden-import=os',
|
||||
'--hidden-import=sys',
|
||||
'--hidden-import=time',
|
||||
'--hidden-import=traceback',
|
||||
'--hidden-import=pathlib',
|
||||
|
||||
# Main script
|
||||
'main.py'
|
||||
]
|
||||
|
||||
# Convert to string command
|
||||
cmd_str = ' '.join(f'"{arg}"' if ' ' in arg else arg for arg in pyinstaller_cmd)
|
||||
|
||||
return run_command(cmd_str, "Building debug version")
|
||||
|
||||
def main():
|
||||
"""Main build process"""
|
||||
print("🚀 Traffic Monitor Build Script")
|
||||
print("=" * 50)
|
||||
|
||||
# Check if PyInstaller is available
|
||||
try:
|
||||
subprocess.run(['pyinstaller', '--version'], check=True, capture_output=True)
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
print("❌ PyInstaller not found. Installing...")
|
||||
if not run_command('pip install pyinstaller', "Installing PyInstaller"):
|
||||
print("Failed to install PyInstaller")
|
||||
return False
|
||||
|
||||
# Check for required files
|
||||
required_files = ['main.py', 'ui', 'controllers', 'utils', 'config.json']
|
||||
missing_files = [f for f in required_files if not os.path.exists(f)]
|
||||
|
||||
if missing_files:
|
||||
print(f"❌ Missing required files/folders: {missing_files}")
|
||||
return False
|
||||
|
||||
print("✅ All required files found")
|
||||
|
||||
# Build debug version first
|
||||
if build_debug_version():
|
||||
print("\n✅ Debug build completed!")
|
||||
print(f"Debug executable: {Path.cwd()}/dist/TrafficMonitorDebug.exe")
|
||||
|
||||
# Build main application
|
||||
if build_application():
|
||||
print(f"\n🎉 All builds completed successfully!")
|
||||
print(f"Main executable: {Path.cwd()}/dist/TrafficMonitor.exe")
|
||||
print(f"Debug executable: {Path.cwd()}/dist/TrafficMonitorDebug.exe")
|
||||
|
||||
print(f"\n📝 To test:")
|
||||
print(f"1. Run debug version first: dist\\TrafficMonitorDebug.exe")
|
||||
print(f"2. If working, run main version: dist\\TrafficMonitor.exe")
|
||||
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
203
qt_app_pyside1/build_exe_optimized.py
Normal file
203
qt_app_pyside1/build_exe_optimized.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""
|
||||
OPTIMIZED PYINSTALLER BUILD SCRIPT v2.0
|
||||
========================================
|
||||
This script addresses all critical errors and warnings from the build log:
|
||||
|
||||
Critical Fixes:
|
||||
1. Missing __init__.py files (fixed by creating them)
|
||||
2. Missing hidden imports (cv2, json modules)
|
||||
3. Correct data file inclusion
|
||||
4. Platform-specific optimizations
|
||||
|
||||
Usage: python build_exe_optimized.py
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
def clean_build_artifacts():
|
||||
"""Clean previous build artifacts"""
|
||||
print("🧹 Cleaning previous build artifacts...")
|
||||
|
||||
artifacts = ['build', 'dist', '*.spec']
|
||||
for artifact in artifacts:
|
||||
if os.path.exists(artifact):
|
||||
if os.path.isdir(artifact):
|
||||
shutil.rmtree(artifact)
|
||||
print(f" Removed directory: {artifact}")
|
||||
else:
|
||||
os.remove(artifact)
|
||||
print(f" Removed file: {artifact}")
|
||||
|
||||
def verify_dependencies():
|
||||
"""Verify all required packages are installed"""
|
||||
print("📦 Verifying dependencies...")
|
||||
|
||||
required_packages = [
|
||||
'PySide6', 'opencv-python', 'numpy', 'openvino',
|
||||
'ultralytics', 'matplotlib', 'pillow', 'requests'
|
||||
]
|
||||
|
||||
missing_packages = []
|
||||
for package in required_packages:
|
||||
try:
|
||||
__import__(package.lower().replace('-', '_'))
|
||||
print(f" ✓ {package}")
|
||||
except ImportError:
|
||||
missing_packages.append(package)
|
||||
print(f" ✗ {package} - MISSING")
|
||||
|
||||
if missing_packages:
|
||||
print(f"\n❌ Missing packages: {', '.join(missing_packages)}")
|
||||
print(" Install with: pip install " + " ".join(missing_packages))
|
||||
return False
|
||||
|
||||
print(" ✓ All dependencies verified")
|
||||
return True
|
||||
|
||||
def build_executable():
|
||||
"""Build the executable with optimized PyInstaller command"""
|
||||
print("🔨 Building executable...")
|
||||
|
||||
# Core PyInstaller command with ALL critical fixes
|
||||
cmd = [
|
||||
'pyinstaller',
|
||||
'--name=TrafficMonitoringApp',
|
||||
'--onefile', # Single executable
|
||||
'--windowed', # No console window
|
||||
'--icon=resources/app_icon.ico' if os.path.exists('resources/app_icon.ico') else '',
|
||||
|
||||
# === CRITICAL HIDDEN IMPORTS (Fixes Build Errors) ===
|
||||
'--hidden-import=cv2',
|
||||
'--hidden-import=cv2.cv2',
|
||||
'--hidden-import=numpy',
|
||||
'--hidden-import=numpy.core',
|
||||
'--hidden-import=openvino',
|
||||
'--hidden-import=openvino.runtime',
|
||||
'--hidden-import=ultralytics',
|
||||
'--hidden-import=ultralytics.engine',
|
||||
'--hidden-import=PySide6.QtCore',
|
||||
'--hidden-import=PySide6.QtWidgets',
|
||||
'--hidden-import=PySide6.QtGui',
|
||||
'--hidden-import=json',
|
||||
'--hidden-import=pathlib',
|
||||
'--hidden-import=threading',
|
||||
'--hidden-import=queue',
|
||||
|
||||
# === UI/CONTROLLER MODULES ===
|
||||
'--hidden-import=ui',
|
||||
'--hidden-import=ui.main_window',
|
||||
'--hidden-import=ui.main_window1',
|
||||
'--hidden-import=controllers',
|
||||
'--hidden-import=controllers.video_controller',
|
||||
'--hidden-import=utils',
|
||||
'--hidden-import=utils.detection_utils',
|
||||
'--hidden-import=utils.tracking_utils',
|
||||
'--hidden-import=utils.crosswalk_utils_advanced',
|
||||
'--hidden-import=utils.traffic_light_utils',
|
||||
|
||||
# === EXCLUDE HEAVY/UNUSED MODULES (Reduces Size) ===
|
||||
'--exclude-module=matplotlib.backends._backend_pdf',
|
||||
'--exclude-module=matplotlib.backends._backend_ps',
|
||||
'--exclude-module=matplotlib.backends._backend_svg',
|
||||
'--exclude-module=tkinter',
|
||||
'--exclude-module=PyQt5',
|
||||
'--exclude-module=unittest',
|
||||
'--exclude-module=test',
|
||||
'--exclude-module=distutils',
|
||||
|
||||
# === DATA FILES AND DIRECTORIES ===
|
||||
'--add-data=config.json;.',
|
||||
'--add-data=resources;resources',
|
||||
'--add-data=openvino_models;openvino_models',
|
||||
'--add-data=ui;ui',
|
||||
'--add-data=controllers;controllers',
|
||||
'--add-data=utils;utils',
|
||||
|
||||
# === SPLASH SCREEN ===
|
||||
'--splash=resources/splash.png' if os.path.exists('resources/splash.png') else '',
|
||||
|
||||
# === MAIN SCRIPT ===
|
||||
'main.py'
|
||||
]
|
||||
|
||||
# Remove empty strings from command
|
||||
cmd = [arg for arg in cmd if arg]
|
||||
|
||||
print("📋 PyInstaller command:")
|
||||
print(" " + " ".join(cmd))
|
||||
print()
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
print("✅ Build completed successfully!")
|
||||
print(f"📁 Executable location: dist/TrafficMonitoringApp.exe")
|
||||
return True
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("❌ Build failed!")
|
||||
print("STDOUT:", e.stdout)
|
||||
print("STDERR:", e.stderr)
|
||||
return False
|
||||
|
||||
def post_build_verification():
|
||||
"""Verify the built executable"""
|
||||
print("🔍 Post-build verification...")
|
||||
|
||||
exe_path = Path('dist/TrafficMonitoringApp.exe')
|
||||
if exe_path.exists():
|
||||
size_mb = exe_path.stat().st_size / (1024 * 1024)
|
||||
print(f" ✓ Executable created: {size_mb:.1f} MB")
|
||||
|
||||
# Check if critical files are bundled
|
||||
print(" 📋 Bundled resources check:")
|
||||
print(" - config.json: Expected in executable")
|
||||
print(" - openvino_models/: Expected in executable")
|
||||
print(" - resources/: Expected in executable")
|
||||
|
||||
return True
|
||||
else:
|
||||
print(" ❌ Executable not found!")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Main build process"""
|
||||
print("🚀 TRAFFIC MONITORING APP - OPTIMIZED BUILD")
|
||||
print("=" * 50)
|
||||
|
||||
# Step 1: Clean artifacts
|
||||
clean_build_artifacts()
|
||||
print()
|
||||
|
||||
# Step 2: Verify dependencies
|
||||
if not verify_dependencies():
|
||||
print("\n❌ Build aborted due to missing dependencies")
|
||||
sys.exit(1)
|
||||
print()
|
||||
|
||||
# Step 3: Build executable
|
||||
if not build_executable():
|
||||
print("\n❌ Build failed")
|
||||
sys.exit(1)
|
||||
print()
|
||||
|
||||
# Step 4: Post-build verification
|
||||
if not post_build_verification():
|
||||
print("\n⚠️ Build completed but verification failed")
|
||||
sys.exit(1)
|
||||
|
||||
print("\n🎉 BUILD SUCCESSFUL!")
|
||||
print("=" * 50)
|
||||
print("📁 Executable: dist/TrafficMonitoringApp.exe")
|
||||
print("🏃 To run: dist\\TrafficMonitoringApp.exe")
|
||||
print("\n💡 Next steps:")
|
||||
print(" 1. Test the executable in a clean environment")
|
||||
print(" 2. Verify all UI elements load correctly")
|
||||
print(" 3. Test video processing and violation detection")
|
||||
print(" 4. Check configuration file loading")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
33
qt_app_pyside1/config.json
Normal file
33
qt_app_pyside1/config.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"detection": {
|
||||
"confidence_threshold": 0.5,
|
||||
"enable_ocr": true,
|
||||
"enable_tracking": true,
|
||||
"model_path": null,
|
||||
"device": "CPU"
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5,
|
||||
"enable_lane": true,
|
||||
"enable_red_light": true,
|
||||
"enable_speed": true,
|
||||
"enable_stop_sign": true
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": true,
|
||||
"show_labels": true,
|
||||
"show_license_plates": true,
|
||||
"show_performance": true
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
},
|
||||
"analytics": {
|
||||
"enable_charts": true,
|
||||
"history_length": 1000
|
||||
}
|
||||
}
|
||||
1
qt_app_pyside1/controllers/__init__.py
Normal file
1
qt_app_pyside1/controllers/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Controllers package for Traffic Monitoring System
|
||||
BIN
qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
qt_app_pyside1/controllers/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
341
qt_app_pyside1/controllers/analytics_controller.py
Normal file
341
qt_app_pyside1/controllers/analytics_controller.py
Normal file
@@ -0,0 +1,341 @@
|
||||
from PySide6.QtCore import QObject, Signal, Slot
|
||||
import numpy as np
|
||||
from collections import defaultdict, deque
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any
|
||||
|
||||
class AnalyticsController(QObject):
|
||||
"""
|
||||
Controller for traffic analytics and statistics.
|
||||
|
||||
Manages:
|
||||
- Vehicle counts by class
|
||||
- Violation statistics
|
||||
- Temporal analytics (traffic over time)
|
||||
- Speed statistics
|
||||
"""
|
||||
analytics_updated = Signal(dict) # Emitted when analytics are updated
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the analytics controller"""
|
||||
super().__init__()
|
||||
|
||||
# Detection statistics
|
||||
self.detection_counts = defaultdict(int)
|
||||
self.detection_history = []
|
||||
|
||||
# Violation statistics
|
||||
self.violation_counts = defaultdict(int)
|
||||
self.violation_history = []
|
||||
|
||||
# Time series data (for charts)
|
||||
self.time_series = {
|
||||
'timestamps': [],
|
||||
'vehicle_counts': [],
|
||||
'pedestrian_counts': [],
|
||||
'violation_counts': []
|
||||
}
|
||||
|
||||
# Performance metrics
|
||||
self.fps_history = deque(maxlen=100)
|
||||
self.processing_times = deque(maxlen=100)
|
||||
|
||||
# Aggregated metrics
|
||||
self.aggregated_metrics = {
|
||||
'total_vehicles': 0,
|
||||
'total_pedestrians': 0,
|
||||
'total_violations': 0,
|
||||
'avg_processing_time': 0,
|
||||
'avg_fps': 0,
|
||||
'peak_vehicle_count': 0,
|
||||
'peak_violation_hour': None
|
||||
}
|
||||
|
||||
# Initialize current time window
|
||||
self.current_window = datetime.now().replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
)
|
||||
self.window_stats = defaultdict(int)
|
||||
|
||||
# Add traffic light analytics
|
||||
self.traffic_light_counts = defaultdict(int) # Counts by color
|
||||
self.traffic_light_color_series = [] # List of (timestamp, color)
|
||||
self.traffic_light_color_numeric = [] # For charting: 0=unknown, 1=red, 2=yellow, 3=green
|
||||
self.traffic_light_color_map = {'unknown': 0, 'red': 1, 'yellow': 2, 'green': 3}
|
||||
|
||||
self._last_update = time.time()
|
||||
@Slot(object, list, float)
|
||||
def process_frame_data(self, frame, detections, metrics):
|
||||
"""
|
||||
Process frame data for analytics.
|
||||
|
||||
Args:
|
||||
frame: Video frame
|
||||
detections: List of detections
|
||||
metrics: Dictionary containing metrics like 'detection_fps' or directly the fps value
|
||||
"""
|
||||
try:
|
||||
# Empty violations list since violation detection is disabled
|
||||
violations = []
|
||||
|
||||
# Debug info
|
||||
det_count = len(detections) if detections else 0
|
||||
print(f"Analytics processing: {det_count} detections")
|
||||
except Exception as e:
|
||||
print(f"Error in process_frame_data initialization: {e}")
|
||||
violations = []
|
||||
# Update FPS history - safely handle different metrics formats
|
||||
try:
|
||||
if isinstance(metrics, dict):
|
||||
fps = metrics.get('detection_fps', None)
|
||||
if isinstance(fps, (int, float)):
|
||||
self.fps_history.append(fps)
|
||||
elif isinstance(metrics, (int, float)):
|
||||
# Handle case where metrics is directly the fps value
|
||||
self.fps_history.append(metrics)
|
||||
else:
|
||||
# Fallback if metrics is neither dict nor numeric
|
||||
print(f"Warning: Unexpected metrics type: {type(metrics)}")
|
||||
except Exception as e:
|
||||
print(f"Error processing metrics: {e}")
|
||||
# Add a default value to keep analytics running
|
||||
self.fps_history.append(0.0)
|
||||
|
||||
# Process detections
|
||||
vehicle_count = 0
|
||||
pedestrian_count = 0
|
||||
|
||||
# --- Traffic light analytics ---
|
||||
traffic_light_count = 0
|
||||
traffic_light_colors = []
|
||||
for det in detections:
|
||||
class_name = det.get('class_name', 'unknown').lower()
|
||||
self.detection_counts[class_name] += 1
|
||||
|
||||
# Track vehicles vs pedestrians
|
||||
if class_name in ['car', 'truck', 'bus', 'motorcycle']:
|
||||
vehicle_count += 1
|
||||
elif class_name == 'person':
|
||||
pedestrian_count += 1
|
||||
if class_name in ['traffic light', 'trafficlight', 'tl', 'signal']:
|
||||
traffic_light_count += 1
|
||||
color = det.get('traffic_light_color', {}).get('color', 'unknown')
|
||||
self.traffic_light_counts[color] += 1
|
||||
traffic_light_colors.append(color)
|
||||
# Track most common color for this frame
|
||||
if traffic_light_colors:
|
||||
from collections import Counter
|
||||
most_common_color = Counter(traffic_light_colors).most_common(1)[0][0]
|
||||
else:
|
||||
most_common_color = 'unknown'
|
||||
now_dt = datetime.now()
|
||||
self.traffic_light_color_series.append((now_dt.strftime('%H:%M:%S'), most_common_color))
|
||||
self.traffic_light_color_numeric.append(self.traffic_light_color_map.get(most_common_color, 0))
|
||||
# Keep last 60 points
|
||||
if len(self.traffic_light_color_series) > 60:
|
||||
self.traffic_light_color_series = self.traffic_light_color_series[-60:]
|
||||
self.traffic_light_color_numeric = self.traffic_light_color_numeric[-60:]
|
||||
|
||||
# Update time series data (once per second)
|
||||
now = time.time()
|
||||
if now - self._last_update >= 1.0:
|
||||
self._update_time_series(vehicle_count, pedestrian_count, len(violations), most_common_color)
|
||||
self._last_update = now
|
||||
|
||||
# Update aggregated metrics
|
||||
self._update_aggregated_metrics()
|
||||
|
||||
# Emit updated analytics
|
||||
self.analytics_updated.emit(self.get_analytics())
|
||||
|
||||
def _update_time_series(self, vehicle_count, pedestrian_count, violation_count, traffic_light_color=None):
|
||||
"""Update time series data for charts"""
|
||||
now = datetime.now()
|
||||
|
||||
# Check if we've moved to a new hour
|
||||
if now.hour != self.current_window.hour or now.day != self.current_window.day:
|
||||
# Save current window stats
|
||||
self._save_window_stats()
|
||||
|
||||
# Reset for new window
|
||||
self.current_window = now.replace(minute=0, second=0, microsecond=0)
|
||||
self.window_stats = defaultdict(int)
|
||||
# Add current counts to window
|
||||
self.window_stats['vehicles'] += vehicle_count
|
||||
self.window_stats['pedestrians'] += pedestrian_count
|
||||
self.window_stats['violations'] += violation_count
|
||||
|
||||
# Add to time series
|
||||
self.time_series['timestamps'].append(now.strftime('%H:%M:%S'))
|
||||
self.time_series['vehicle_counts'].append(vehicle_count)
|
||||
self.time_series['pedestrian_counts'].append(pedestrian_count)
|
||||
self.time_series['violation_counts'].append(violation_count)
|
||||
|
||||
# Add traffic light color to time series
|
||||
if traffic_light_color is not None:
|
||||
if 'traffic_light_colors' not in self.time_series:
|
||||
self.time_series['traffic_light_colors'] = []
|
||||
self.time_series['traffic_light_colors'].append(traffic_light_color)
|
||||
if len(self.time_series['traffic_light_colors']) > 60:
|
||||
self.time_series['traffic_light_colors'] = self.time_series['traffic_light_colors'][-60:]
|
||||
|
||||
# Keep last 60 data points (1 minute at 1 Hz)
|
||||
if len(self.time_series['timestamps']) > 60:
|
||||
for key in self.time_series:
|
||||
self.time_series[key] = self.time_series[key][-60:]
|
||||
|
||||
def _save_window_stats(self):
|
||||
"""Save stats for the current time window"""
|
||||
if sum(self.window_stats.values()) > 0:
|
||||
window_info = {
|
||||
'time': self.current_window,
|
||||
'vehicles': self.window_stats['vehicles'],
|
||||
'pedestrians': self.window_stats['pedestrians'],
|
||||
'violations': self.window_stats['violations']
|
||||
}
|
||||
|
||||
# Update peak stats
|
||||
if window_info['vehicles'] > self.aggregated_metrics['peak_vehicle_count']:
|
||||
self.aggregated_metrics['peak_vehicle_count'] = window_info['vehicles']
|
||||
|
||||
if window_info['violations'] > 0:
|
||||
if self.aggregated_metrics['peak_violation_hour'] is None or \
|
||||
window_info['violations'] > self.aggregated_metrics['peak_violation_hour']['violations']:
|
||||
self.aggregated_metrics['peak_violation_hour'] = {
|
||||
'time': self.current_window.strftime('%H:%M'),
|
||||
'violations': window_info['violations']
|
||||
}
|
||||
|
||||
def _update_aggregated_metrics(self):
|
||||
"""Update aggregated analytics metrics"""
|
||||
# Count totals
|
||||
self.aggregated_metrics['total_vehicles'] = sum([
|
||||
self.detection_counts[c] for c in
|
||||
['car', 'truck', 'bus', 'motorcycle']
|
||||
])
|
||||
self.aggregated_metrics['total_pedestrians'] = self.detection_counts['person']
|
||||
self.aggregated_metrics['total_violations'] = sum(self.violation_counts.values())
|
||||
|
||||
# Average FPS
|
||||
if self.fps_history:
|
||||
# Only sum numbers, skip dicts
|
||||
numeric_fps = [f for f in self.fps_history if isinstance(f, (int, float))]
|
||||
if numeric_fps:
|
||||
self.aggregated_metrics['avg_fps'] = sum(numeric_fps) / len(numeric_fps)
|
||||
else:
|
||||
self.aggregated_metrics['avg_fps'] = 0.0
|
||||
|
||||
# Average processing time
|
||||
if self.processing_times:
|
||||
self.aggregated_metrics['avg_processing_time'] = sum(self.processing_times) / len(self.processing_times)
|
||||
|
||||
def get_analytics(self) -> Dict:
|
||||
"""
|
||||
Get current analytics data.
|
||||
|
||||
Returns:
|
||||
Dictionary of analytics data
|
||||
"""
|
||||
return {
|
||||
'detection_counts': dict(self.detection_counts),
|
||||
'violation_counts': dict(self.violation_counts),
|
||||
'time_series': self.time_series,
|
||||
'metrics': self.aggregated_metrics,
|
||||
'recent_violations': self.violation_history[-10:] if self.violation_history else [],
|
||||
'traffic_light_counts': dict(self.traffic_light_counts),
|
||||
'traffic_light_color_series': self.traffic_light_color_series,
|
||||
'traffic_light_color_numeric': self.traffic_light_color_numeric
|
||||
}
|
||||
|
||||
def get_violation_history(self) -> List:
|
||||
"""
|
||||
Get violation history.
|
||||
|
||||
Returns:
|
||||
List of violation events
|
||||
"""
|
||||
return self.violation_history.copy()
|
||||
|
||||
def clear_statistics(self):
|
||||
"""Reset all statistics"""
|
||||
self.detection_counts = defaultdict(int)
|
||||
self.violation_counts = defaultdict(int)
|
||||
self.detection_history = []
|
||||
self.violation_history = []
|
||||
self.time_series = {
|
||||
'timestamps': [],
|
||||
'vehicle_counts': [],
|
||||
'pedestrian_counts': [],
|
||||
'violation_counts': []
|
||||
}
|
||||
self.fps_history.clear()
|
||||
self.processing_times.clear()
|
||||
self.window_stats = defaultdict(int)
|
||||
self.aggregated_metrics = {
|
||||
'total_vehicles': 0,
|
||||
'total_pedestrians': 0,
|
||||
'total_violations': 0,
|
||||
'avg_processing_time': 0,
|
||||
'avg_fps': 0,
|
||||
'peak_vehicle_count': 0,
|
||||
'peak_violation_hour': None
|
||||
}
|
||||
|
||||
def register_violation(self, violation):
|
||||
"""
|
||||
Register a new violation in the analytics.
|
||||
|
||||
Args:
|
||||
violation: Dictionary with violation information
|
||||
"""
|
||||
try:
|
||||
# Add to violation counts - check both 'violation' and 'violation_type' keys
|
||||
violation_type = violation.get('violation_type') or violation.get('violation', 'unknown')
|
||||
self.violation_counts[violation_type] += 1
|
||||
|
||||
# Add to violation history
|
||||
self.violation_history.append(violation)
|
||||
|
||||
# Update time series
|
||||
now = datetime.now()
|
||||
self.time_series['timestamps'].append(now)
|
||||
|
||||
# If we've been running for a while, we might need to drop old timestamps
|
||||
if len(self.time_series['timestamps']) > 100: # Keep last 100 points
|
||||
self.time_series['timestamps'] = self.time_series['timestamps'][-100:]
|
||||
self.time_series['vehicle_counts'] = self.time_series['vehicle_counts'][-100:]
|
||||
self.time_series['pedestrian_counts'] = self.time_series['pedestrian_counts'][-100:]
|
||||
self.time_series['violation_counts'] = self.time_series['violation_counts'][-100:]
|
||||
|
||||
# Append current totals to time series
|
||||
self.time_series['violation_counts'].append(sum(self.violation_counts.values()))
|
||||
|
||||
# Make sure all time series have the same length
|
||||
while len(self.time_series['vehicle_counts']) < len(self.time_series['timestamps']):
|
||||
self.time_series['vehicle_counts'].append(sum(self.detection_counts.get(c, 0)
|
||||
for c in ['car', 'truck', 'bus', 'motorcycle']))
|
||||
|
||||
while len(self.time_series['pedestrian_counts']) < len(self.time_series['timestamps']):
|
||||
self.time_series['pedestrian_counts'].append(self.detection_counts.get('person', 0))
|
||||
|
||||
# Update aggregated metrics
|
||||
self.aggregated_metrics['total_violations'] = sum(self.violation_counts.values())
|
||||
|
||||
# Emit updated analytics
|
||||
self._emit_analytics_update()
|
||||
|
||||
print(f"📊 Registered violation in analytics: {violation_type}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error registering violation in analytics: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _emit_analytics_update(self):
|
||||
"""Emit analytics update signal with current data"""
|
||||
try:
|
||||
self.analytics_updated.emit(self.get_analytics())
|
||||
except Exception as e:
|
||||
print(f"❌ Error emitting analytics update: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
1085
qt_app_pyside1/controllers/bytetrack_demo.py
Normal file
1085
qt_app_pyside1/controllers/bytetrack_demo.py
Normal file
File diff suppressed because it is too large
Load Diff
550
qt_app_pyside1/controllers/bytetrack_tracker.py
Normal file
550
qt_app_pyside1/controllers/bytetrack_tracker.py
Normal file
@@ -0,0 +1,550 @@
|
||||
# ByteTrack implementation for vehicle tracking
|
||||
# Efficient and robust multi-object tracking that works exactly like DeepSORT
|
||||
import numpy as np
|
||||
import cv2
|
||||
import time
|
||||
from collections import defaultdict, deque
|
||||
from typing import List, Dict, Any, Tuple, Optional
|
||||
import torch
|
||||
|
||||
class Track:
|
||||
"""Track class for ByteTracker - Compatible with video_controller_new.py"""
|
||||
|
||||
def __init__(self, detection, track_id):
|
||||
"""Initialize a track from a detection
|
||||
|
||||
Args:
|
||||
detection: Detection array [x1, y1, x2, y2, score, class_id]
|
||||
track_id: Unique track ID
|
||||
"""
|
||||
self.track_id = track_id
|
||||
self.tlbr = detection[:4] # [x1, y1, x2, y2]
|
||||
self.score = detection[4] if len(detection) > 4 else 0.5
|
||||
self.class_id = int(detection[5]) if len(detection) > 5 else 0
|
||||
|
||||
self.time_since_update = 0
|
||||
self.hits = 1 # Number of times track was matched to a detection
|
||||
self.age = 1
|
||||
self.frame_id = 0 # Will be set by the tracker during update
|
||||
self.is_lost = False # Flag to indicate if track is lost
|
||||
self.state = 'Tentative' # Track state: Tentative, Confirmed, Deleted
|
||||
|
||||
# Store position history for movement tracking
|
||||
self.history = deque(maxlen=30)
|
||||
self.history.append(self.tlbr.copy())
|
||||
|
||||
# Simple velocity estimation
|
||||
self.velocity = np.array([0., 0.])
|
||||
|
||||
def predict(self):
|
||||
"""Predict the next state using simple motion model"""
|
||||
if len(self.history) >= 2:
|
||||
# Simple velocity estimation from last two positions
|
||||
curr_center = np.array([(self.tlbr[0] + self.tlbr[2])/2, (self.tlbr[1] + self.tlbr[3])/2])
|
||||
prev_tlbr = self.history[-2]
|
||||
prev_center = np.array([(prev_tlbr[0] + prev_tlbr[2])/2, (prev_tlbr[1] + prev_tlbr[3])/2])
|
||||
self.velocity = curr_center - prev_center
|
||||
|
||||
# Predict next position
|
||||
next_center = curr_center + self.velocity
|
||||
w, h = self.tlbr[2] - self.tlbr[0], self.tlbr[3] - self.tlbr[1]
|
||||
self.tlbr = np.array([next_center[0] - w/2, next_center[1] - h/2,
|
||||
next_center[0] + w/2, next_center[1] + h/2])
|
||||
|
||||
self.age += 1
|
||||
self.time_since_update += 1
|
||||
|
||||
def update(self, detection):
|
||||
"""Update track with new detection"""
|
||||
self.tlbr = detection[:4]
|
||||
self.score = detection[4] if len(detection) > 4 else self.score
|
||||
self.class_id = int(detection[5]) if len(detection) > 5 else self.class_id
|
||||
|
||||
self.hits += 1
|
||||
self.time_since_update = 0
|
||||
self.history.append(self.tlbr.copy())
|
||||
|
||||
# Update state to confirmed after enough hits
|
||||
if self.state == 'Tentative' and self.hits >= 3:
|
||||
self.state = 'Confirmed'
|
||||
|
||||
def mark_missed(self):
|
||||
"""Mark track as missed (no detection matched)"""
|
||||
self.time_since_update += 1
|
||||
if self.time_since_update > 1:
|
||||
self.is_lost = True
|
||||
|
||||
def is_confirmed(self):
|
||||
"""Check if track is confirmed (has enough hits)"""
|
||||
return self.state == 'Confirmed'
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert track to dictionary format for video_controller_new.py"""
|
||||
return {
|
||||
'id': self.track_id,
|
||||
'bbox': [float(self.tlbr[0]), float(self.tlbr[1]), float(self.tlbr[2]), float(self.tlbr[3])],
|
||||
'confidence': float(self.score),
|
||||
'class_id': int(self.class_id)
|
||||
}
|
||||
|
||||
|
||||
class BYTETracker:
|
||||
"""
|
||||
ByteTrack tracker implementation
|
||||
Designed to work exactly like DeepSORT with video_controller_new.py
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
track_thresh=0.5,
|
||||
track_buffer=30,
|
||||
match_thresh=0.7,
|
||||
frame_rate=30,
|
||||
track_high_thresh=0.6,
|
||||
track_low_thresh=0.1
|
||||
):
|
||||
self.tracked_tracks = [] # Active tracks being tracked
|
||||
self.lost_tracks = [] # Lost tracks (temporarily out of view)
|
||||
self.removed_tracks = [] # Removed tracks (permanently lost)
|
||||
|
||||
self.frame_id = 0
|
||||
self.max_time_lost = int(frame_rate / 30.0 * track_buffer)
|
||||
|
||||
self.track_thresh = track_thresh # Threshold for high-confidence detections
|
||||
self.track_high_thresh = track_high_thresh # Higher threshold for first association
|
||||
self.track_low_thresh = track_low_thresh # Lower threshold for second association
|
||||
self.match_thresh = match_thresh # IOU match threshold
|
||||
|
||||
self.track_id_count = 0
|
||||
|
||||
print(f"[BYTETRACK] Initialized with: high_thresh={track_high_thresh}, " +
|
||||
f"low_thresh={track_low_thresh}, match_thresh={match_thresh}, max_time_lost={self.max_time_lost}")
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
"""Update tracks with new detections
|
||||
|
||||
Args:
|
||||
detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
frame: Optional BGR frame for debug visualization
|
||||
|
||||
Returns:
|
||||
list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
"""
|
||||
self.frame_id += 1
|
||||
|
||||
# Convert detections to internal format
|
||||
converted_detections = self._convert_detections(detections)
|
||||
|
||||
print(f"[BYTETRACK] Frame {self.frame_id}: Processing {len(converted_detections)} detections")
|
||||
print(f"[BYTETRACK] Current state: {len(self.tracked_tracks)} tracked, {len(self.lost_tracks)} lost")
|
||||
|
||||
# Handle empty detections case
|
||||
if len(converted_detections) == 0:
|
||||
print(f"[BYTETRACK] No valid detections in frame {self.frame_id}")
|
||||
# Move all tracked to lost and update
|
||||
for track in self.tracked_tracks:
|
||||
track.mark_missed()
|
||||
track.predict()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
self.lost_tracks.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
# Update lost tracks
|
||||
updated_lost = []
|
||||
for track in self.lost_tracks:
|
||||
track.predict()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
updated_lost.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
self.tracked_tracks = []
|
||||
self.lost_tracks = updated_lost
|
||||
return []
|
||||
|
||||
# Split detections into high and low confidence
|
||||
confidence_values = converted_detections[:, 4].astype(float)
|
||||
high_indices = confidence_values >= self.track_high_thresh
|
||||
low_indices = (confidence_values >= self.track_low_thresh) & (confidence_values < self.track_high_thresh)
|
||||
|
||||
high_detections = converted_detections[high_indices]
|
||||
low_detections = converted_detections[low_indices]
|
||||
|
||||
print(f"[BYTETRACK] Split into {len(high_detections)} high-conf and {len(low_detections)} low-conf detections")
|
||||
|
||||
# Predict all tracks
|
||||
for track in self.tracked_tracks + self.lost_tracks:
|
||||
track.predict()
|
||||
|
||||
# First association: high-confidence detections with tracked tracks
|
||||
matches1, unmatched_tracks1, unmatched_dets1 = self._associate(
|
||||
high_detections, self.tracked_tracks, self.match_thresh)
|
||||
|
||||
# Update matched tracks
|
||||
for match in matches1:
|
||||
track_idx, det_idx = match
|
||||
self.tracked_tracks[track_idx].update(high_detections[det_idx])
|
||||
self.tracked_tracks[track_idx].frame_id = self.frame_id
|
||||
|
||||
# Move unmatched tracks to lost
|
||||
unmatched_tracked_tracks = []
|
||||
for idx in unmatched_tracks1:
|
||||
track = self.tracked_tracks[idx]
|
||||
track.mark_missed()
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
self.lost_tracks.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
|
||||
# Keep only matched tracks
|
||||
self.tracked_tracks = [self.tracked_tracks[i] for i in range(len(self.tracked_tracks)) if i not in unmatched_tracks1]
|
||||
|
||||
# Second association: remaining high-conf detections with lost tracks
|
||||
if len(unmatched_dets1) > 0 and len(self.lost_tracks) > 0:
|
||||
remaining_high_dets = high_detections[unmatched_dets1]
|
||||
matches2, unmatched_tracks2, unmatched_dets2 = self._associate(
|
||||
remaining_high_dets, self.lost_tracks, self.match_thresh)
|
||||
|
||||
# Reactivate matched lost tracks
|
||||
for match in matches2:
|
||||
track_idx, det_idx = match
|
||||
track = self.lost_tracks[track_idx]
|
||||
track.update(remaining_high_dets[det_idx])
|
||||
track.frame_id = self.frame_id
|
||||
track.is_lost = False
|
||||
self.tracked_tracks.append(track)
|
||||
|
||||
# Remove reactivated tracks from lost
|
||||
self.lost_tracks = [self.lost_tracks[i] for i in range(len(self.lost_tracks)) if i not in [m[0] for m in matches2]]
|
||||
|
||||
# Update unmatched detections indices
|
||||
final_unmatched_dets = [unmatched_dets1[i] for i in unmatched_dets2]
|
||||
else:
|
||||
final_unmatched_dets = unmatched_dets1
|
||||
|
||||
# Third association: low-confidence detections with remaining lost tracks
|
||||
if len(low_detections) > 0 and len(self.lost_tracks) > 0:
|
||||
matches3, unmatched_tracks3, unmatched_dets3 = self._associate(
|
||||
low_detections, self.lost_tracks, self.match_thresh)
|
||||
|
||||
# Reactivate matched lost tracks
|
||||
for match in matches3:
|
||||
track_idx, det_idx = match
|
||||
track = self.lost_tracks[track_idx]
|
||||
track.update(low_detections[det_idx])
|
||||
track.frame_id = self.frame_id
|
||||
track.is_lost = False
|
||||
self.tracked_tracks.append(track)
|
||||
|
||||
# Remove reactivated tracks from lost
|
||||
self.lost_tracks = [self.lost_tracks[i] for i in range(len(self.lost_tracks)) if i not in [m[0] for m in matches3]]
|
||||
|
||||
# Create new tracks for remaining unmatched high-confidence detections
|
||||
new_tracks_created = 0
|
||||
for det_idx in final_unmatched_dets:
|
||||
detection = high_detections[det_idx]
|
||||
if detection[4] >= self.track_thresh: # Only create tracks for high-confidence detections
|
||||
self.track_id_count += 1
|
||||
new_track = Track(detection, self.track_id_count)
|
||||
new_track.frame_id = self.frame_id
|
||||
self.tracked_tracks.append(new_track)
|
||||
new_tracks_created += 1
|
||||
|
||||
# Clean up lost tracks that have been lost too long
|
||||
updated_lost = []
|
||||
removed_count = 0
|
||||
for track in self.lost_tracks:
|
||||
if track.time_since_update <= self.max_time_lost:
|
||||
updated_lost.append(track)
|
||||
else:
|
||||
self.removed_tracks.append(track)
|
||||
removed_count += 1
|
||||
self.lost_tracks = updated_lost
|
||||
|
||||
print(f"[BYTETRACK] Matched {len(matches1)} tracks, created {new_tracks_created} new tracks, removed {removed_count} expired tracks")
|
||||
print(f"[BYTETRACK] Final state: {len(self.tracked_tracks)} tracked, {len(self.lost_tracks)} lost")
|
||||
|
||||
# Return confirmed tracks in dictionary format
|
||||
confirmed_tracks = []
|
||||
for track in self.tracked_tracks:
|
||||
if track.is_confirmed():
|
||||
confirmed_tracks.append(track.to_dict())
|
||||
|
||||
print(f"[BYTETRACK] Returning {len(confirmed_tracks)} confirmed tracks")
|
||||
return confirmed_tracks
|
||||
|
||||
def _convert_detections(self, detections):
|
||||
"""Convert detection format to numpy array"""
|
||||
if len(detections) == 0:
|
||||
return np.empty((0, 6))
|
||||
|
||||
converted = []
|
||||
for det in detections:
|
||||
bbox = det.get('bbox', [0, 0, 0, 0])
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', 0)
|
||||
|
||||
# Ensure bbox is valid
|
||||
if len(bbox) == 4 and bbox[2] > bbox[0] and bbox[3] > bbox[1]:
|
||||
converted.append([float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3]), float(conf), int(class_id)])
|
||||
|
||||
return np.array(converted) if converted else np.empty((0, 6))
|
||||
|
||||
def _associate(self, detections, tracks, iou_threshold):
|
||||
"""Associate detections with tracks using IoU"""
|
||||
if len(detections) == 0 or len(tracks) == 0:
|
||||
return [], list(range(len(tracks))), list(range(len(detections)))
|
||||
|
||||
# Calculate IoU matrix
|
||||
iou_matrix = self._calculate_iou_matrix(detections[:, :4], np.array([track.tlbr for track in tracks]))
|
||||
|
||||
# Use Hungarian algorithm (simplified greedy approach)
|
||||
matches, unmatched_tracks, unmatched_detections = self._linear_assignment(iou_matrix, iou_threshold)
|
||||
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
def _calculate_iou_matrix(self, detections, tracks):
|
||||
"""Calculate IoU matrix between detections and tracks"""
|
||||
if len(detections) == 0 or len(tracks) == 0:
|
||||
return np.empty((0, 0))
|
||||
|
||||
# Calculate areas
|
||||
det_areas = (detections[:, 2] - detections[:, 0]) * (detections[:, 3] - detections[:, 1])
|
||||
track_areas = (tracks[:, 2] - tracks[:, 0]) * (tracks[:, 3] - tracks[:, 1])
|
||||
|
||||
# Calculate intersections
|
||||
ious = np.zeros((len(detections), len(tracks)))
|
||||
for i, det in enumerate(detections):
|
||||
for j, track in enumerate(tracks):
|
||||
# Intersection coordinates
|
||||
x1 = max(det[0], track[0])
|
||||
y1 = max(det[1], track[1])
|
||||
x2 = min(det[2], track[2])
|
||||
y2 = min(det[3], track[3])
|
||||
|
||||
if x2 > x1 and y2 > y1:
|
||||
intersection = (x2 - x1) * (y2 - y1)
|
||||
union = det_areas[i] + track_areas[j] - intersection
|
||||
ious[i, j] = intersection / union if union > 0 else 0
|
||||
else:
|
||||
ious[i, j] = 0
|
||||
|
||||
return ious
|
||||
|
||||
def _linear_assignment(self, cost_matrix, threshold):
|
||||
"""Simple greedy assignment based on IoU threshold"""
|
||||
matches = []
|
||||
unmatched_tracks = list(range(cost_matrix.shape[1]))
|
||||
unmatched_detections = list(range(cost_matrix.shape[0]))
|
||||
|
||||
if cost_matrix.size == 0:
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
# Find matches above threshold
|
||||
for i in range(cost_matrix.shape[0]):
|
||||
for j in range(cost_matrix.shape[1]):
|
||||
if cost_matrix[i, j] >= threshold:
|
||||
if i in unmatched_detections and j in unmatched_tracks:
|
||||
matches.append([j, i]) # [track_idx, det_idx]
|
||||
unmatched_tracks.remove(j)
|
||||
unmatched_detections.remove(i)
|
||||
break
|
||||
|
||||
return matches, unmatched_tracks, unmatched_detections
|
||||
|
||||
|
||||
class ByteTrackVehicleTracker:
|
||||
"""
|
||||
ByteTrack-based vehicle tracker with exact same API as DeepSortVehicleTracker
|
||||
for drop-in replacement in video_controller_new.py
|
||||
"""
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
print("[BYTETRACK SINGLETON] Creating ByteTrackVehicleTracker instance")
|
||||
cls._instance = super(ByteTrackVehicleTracker, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if getattr(self, '_initialized', False):
|
||||
return
|
||||
print("[BYTETRACK INIT] Initializing ByteTrack tracker")
|
||||
|
||||
# Parameters optimized for vehicle tracking in traffic scenes
|
||||
self.tracker = BYTETracker(
|
||||
track_thresh=0.4, # Minimum confidence to create new tracks
|
||||
track_buffer=30, # How many frames to keep lost tracks
|
||||
match_thresh=0.7, # IoU threshold for matching
|
||||
track_high_thresh=0.5, # High confidence threshold for first association
|
||||
track_low_thresh=0.2, # Low confidence threshold for second association
|
||||
frame_rate=30 # Expected frame rate
|
||||
)
|
||||
|
||||
self._initialized = True
|
||||
self.debug = True # Enable debug output
|
||||
|
||||
# Memory management
|
||||
self.max_removed_tracks = 100 # Limit removed tracks to prevent memory issues
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
"""
|
||||
Update tracker with new detections - EXACT API as DeepSORT
|
||||
|
||||
Args:
|
||||
detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
frame: BGR image (optional)
|
||||
|
||||
Returns:
|
||||
list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
"""
|
||||
try:
|
||||
# Input validation
|
||||
if not isinstance(detections, list):
|
||||
print(f"[BYTETRACK ERROR] Invalid detections format: {type(detections)}")
|
||||
return []
|
||||
|
||||
# Process detections
|
||||
valid_dets = []
|
||||
for i, det in enumerate(detections):
|
||||
if not isinstance(det, dict):
|
||||
continue
|
||||
|
||||
bbox = det.get('bbox')
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', 0)
|
||||
|
||||
if bbox is not None and len(bbox) == 4:
|
||||
x1, y1, x2, y2 = map(float, bbox)
|
||||
conf = float(conf)
|
||||
class_id = int(class_id)
|
||||
|
||||
# Validate bbox dimensions
|
||||
if x2 > x1 and y2 > y1 and conf > 0.1:
|
||||
valid_dets.append({
|
||||
'bbox': [x1, y1, x2, y2],
|
||||
'confidence': conf,
|
||||
'class_id': class_id
|
||||
})
|
||||
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Processing {len(valid_dets)} valid detections")
|
||||
|
||||
# Update tracker
|
||||
tracks = self.tracker.update(valid_dets, frame)
|
||||
|
||||
# Memory management - limit removed tracks
|
||||
if len(self.tracker.removed_tracks) > self.max_removed_tracks:
|
||||
self.tracker.removed_tracks = self.tracker.removed_tracks[-self.max_removed_tracks//2:]
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Cleaned up removed tracks, keeping last {len(self.tracker.removed_tracks)}")
|
||||
|
||||
return tracks
|
||||
|
||||
except Exception as e:
|
||||
print(f"[BYTETRACK ERROR] Error updating tracker: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracks(self, detections, frame=None):
|
||||
"""
|
||||
Update method for compatibility with DeepSORT interface used by model_manager.py
|
||||
|
||||
Args:
|
||||
detections: list of detection arrays in format [bbox_xywh, conf, class_name]
|
||||
frame: BGR image (optional)
|
||||
|
||||
Returns:
|
||||
list of track objects with DeepSORT-compatible interface including is_confirmed() method
|
||||
"""
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] update_tracks called with {len(detections)} detections")
|
||||
|
||||
# Convert from DeepSORT format to ByteTrack format
|
||||
converted_dets = []
|
||||
|
||||
for det in detections:
|
||||
try:
|
||||
# Handle different detection formats
|
||||
if isinstance(det, (list, tuple)) and len(det) >= 2:
|
||||
# DeepSORT format: [bbox_xywh, conf, class_name]
|
||||
bbox_xywh, conf = det[:2]
|
||||
class_name = det[2] if len(det) > 2 else 'vehicle'
|
||||
|
||||
# Convert [x, y, w, h] to [x1, y1, x2, y2] with type validation
|
||||
if isinstance(bbox_xywh, (list, tuple, np.ndarray)) and len(bbox_xywh) == 4:
|
||||
x, y, w, h = map(float, bbox_xywh)
|
||||
conf = float(conf)
|
||||
|
||||
converted_dets.append({
|
||||
'bbox': [x, y, x + w, y + h],
|
||||
'confidence': conf,
|
||||
'class_id': 0 # Default vehicle class
|
||||
})
|
||||
else:
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Skipping invalid detection format: {det}")
|
||||
except Exception as e:
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Error converting detection: {e}")
|
||||
|
||||
# Call the regular update method to get dictionary tracks
|
||||
dict_tracks = self.update(converted_dets, frame)
|
||||
|
||||
if self.debug:
|
||||
print(f"[BYTETRACK] Converting {len(dict_tracks)} dict tracks to DeepSORT-compatible objects")
|
||||
|
||||
# Create DeepSORT compatible track objects from dictionaries
|
||||
ds_tracks = []
|
||||
for track_data in dict_tracks:
|
||||
ds_track = ByteTrackOutput(track_data)
|
||||
ds_tracks.append(ds_track)
|
||||
|
||||
return ds_tracks
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset the tracker to clean state - starts track IDs from 1
|
||||
Call this when starting a new video or session
|
||||
"""
|
||||
print("[BYTETRACK] Resetting tracker state")
|
||||
if hasattr(self, 'tracker') and self.tracker is not None:
|
||||
# Reset the internal BYTETracker
|
||||
self.tracker.tracked_tracks = []
|
||||
self.tracker.lost_tracks = []
|
||||
self.tracker.removed_tracks = []
|
||||
self.tracker.frame_id = 0
|
||||
self.tracker.track_id_count = 0 # Reset ID counter to start from 1
|
||||
|
||||
print("[BYTETRACK] Reset complete - track IDs will start from 1")
|
||||
else:
|
||||
print("[BYTETRACK] Warning: Tracker not initialized, nothing to reset")
|
||||
|
||||
|
||||
class ByteTrackOutput:
|
||||
"""
|
||||
Adapter class to make ByteTrack output compatible with DeepSORT interface
|
||||
"""
|
||||
|
||||
def __init__(self, track_data):
|
||||
"""Initialize from ByteTrack track dictionary"""
|
||||
self.track_id = track_data.get('id', -1)
|
||||
self.det_index = track_data.get('det_index', -1)
|
||||
self.to_tlwh_ret = track_data.get('bbox', [0, 0, 0, 0]) # [x, y, w, h]
|
||||
self.bbox = track_data.get('bbox', [0, 0, 0, 0]) # Add bbox property
|
||||
self.confidence = track_data.get('confidence', 0.0)
|
||||
self.is_confirmed = track_data.get('is_confirmed', True)
|
||||
# Store the original track data
|
||||
self._track_data = track_data
|
||||
|
||||
def to_tlwh(self):
|
||||
"""Return bounding box in [x, y, w, h] format"""
|
||||
return self.to_tlwh_ret
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Fallback to original track data"""
|
||||
if name in self._track_data:
|
||||
return self._track_data[name]
|
||||
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
|
||||
103
qt_app_pyside1/controllers/deepsort_tracker.py
Normal file
103
qt_app_pyside1/controllers/deepsort_tracker.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# DeepSORT integration for vehicle tracking
|
||||
# You need to install deep_sort_realtime: pip install deep_sort_realtime
|
||||
from deep_sort_realtime.deepsort_tracker import DeepSort
|
||||
|
||||
class DeepSortVehicleTracker:
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
print("[DEEPSORT SINGLETON] Creating DeepSortVehicleTracker instance")
|
||||
cls._instance = super(DeepSortVehicleTracker, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if getattr(self, '_initialized', False):
|
||||
return
|
||||
print("[DEEPSORT INIT] Initializing DeepSort tracker (should only see this once)")
|
||||
# Use DeepSORT with better parameters to reduce duplicate IDs
|
||||
self.tracker = DeepSort(
|
||||
max_age=50, # Keep tracks longer to avoid re-creating IDs
|
||||
n_init=3, # Require 3 consecutive detections before confirming track
|
||||
nms_max_overlap=0.3, # Stricter NMS to avoid duplicate detections
|
||||
max_cosine_distance=0.4, # Stricter appearance matching
|
||||
nn_budget=100, # Budget for appearance features
|
||||
gating_only_position=False # Use both position and appearance for gating
|
||||
)
|
||||
self._initialized = True
|
||||
self.track_id_counter = {} # Track seen IDs to detect duplicates
|
||||
|
||||
def update(self, detections, frame=None):
|
||||
# detections: list of dicts with keys ['bbox', 'confidence', 'class_id', ...]
|
||||
# frame: BGR image (optional, for appearance embedding)
|
||||
# Returns: list of dicts with keys ['id', 'bbox', 'confidence', 'class_id', ...]
|
||||
|
||||
# Convert detections to DeepSORT format with validation
|
||||
ds_detections = []
|
||||
for i, det in enumerate(detections):
|
||||
bbox = det.get('bbox')
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_id = det.get('class_id', -1)
|
||||
|
||||
if bbox is not None and len(bbox) == 4:
|
||||
x1, y1, x2, y2 = bbox
|
||||
# Validate bbox dimensions
|
||||
if x2 > x1 and y2 > y1 and conf > 0.3: # Higher confidence threshold
|
||||
# Convert to [x1, y1, width, height] format expected by DeepSORT
|
||||
bbox_xywh = [x1, y1, x2-x1, y2-y1]
|
||||
ds_detections.append([bbox_xywh, conf, class_id])
|
||||
print(f"[DEEPSORT] Added detection {i}: bbox={bbox_xywh}, conf={conf:.2f}")
|
||||
else:
|
||||
print(f"[DEEPSORT] Rejected detection {i}: invalid bbox or low confidence")
|
||||
else:
|
||||
print(f"[DEEPSORT] Rejected detection {i}: invalid bbox format")
|
||||
|
||||
print(f"[DEEPSORT] Processing {len(ds_detections)} valid detections")
|
||||
|
||||
# Update tracker with frame for appearance features
|
||||
if frame is not None:
|
||||
tracks = self.tracker.update_tracks(ds_detections, frame=frame)
|
||||
else:
|
||||
tracks = self.tracker.update_tracks(ds_detections)
|
||||
|
||||
# Process results and check for duplicate IDs
|
||||
results = []
|
||||
current_ids = []
|
||||
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
|
||||
track_id = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
conf = track.det_conf if hasattr(track, 'det_conf') else 0.0
|
||||
class_id = track.det_class if hasattr(track, 'det_class') else -1
|
||||
|
||||
# Check for duplicate IDs
|
||||
if track_id in current_ids:
|
||||
print(f"[DEEPSORT ERROR] DUPLICATE ID DETECTED: {track_id}")
|
||||
continue # Skip this duplicate
|
||||
|
||||
current_ids.append(track_id)
|
||||
|
||||
# Convert back to [x1, y1, x2, y2] format
|
||||
x1, y1, x2, y2 = ltrb
|
||||
bbox_xyxy = [x1, y1, x2, y2]
|
||||
|
||||
results.append({
|
||||
'id': track_id,
|
||||
'bbox': bbox_xyxy,
|
||||
'confidence': conf,
|
||||
'class_id': class_id
|
||||
})
|
||||
|
||||
conf_str = f"{conf:.2f}" if conf is not None else "None"
|
||||
print(f"[DEEPSORT] Track ID={track_id}: bbox={bbox_xyxy}, conf={conf_str}")
|
||||
|
||||
# Update ID counter for statistics
|
||||
for track_id in current_ids:
|
||||
self.track_id_counter[track_id] = self.track_id_counter.get(track_id, 0) + 1
|
||||
|
||||
print(f"[DEEPSORT] Returning {len(results)} confirmed tracks")
|
||||
return results
|
||||
173
qt_app_pyside1/controllers/difference.py
Normal file
173
qt_app_pyside1/controllers/difference.py
Normal file
@@ -0,0 +1,173 @@
|
||||
# Detailed Comparison: video_controller_new.py vs video_controller_finale.py
|
||||
#
|
||||
# This document provides a function-by-function, block-by-block comparison between `video_controller_new.py` and `video_controller_finale.py` as of July 2025. It highlights what is present, missing, or different in each file, and explains the practical impact of those differences for real-world red light violation detection and video analytics.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Table of Contents
|
||||
# - [Overall Structure](#overall-structure)
|
||||
# - [Class/Function Inventory](#classfunction-inventory)
|
||||
# - [Function-by-Function Comparison](#function-by-function-comparison)
|
||||
# - [__init__](#__init__)
|
||||
# - [set_source](#set_source)
|
||||
# - [_get_source_properties](#_get_source_properties)
|
||||
# - [start/stop](#startstop)
|
||||
# - [_run](#_run)
|
||||
# - [_process_frame](#_process_frame)
|
||||
# - [detect_red_light_violations](#detect_red_light_violations)
|
||||
# - [Key Differences and Impact](#key-differences-and-impact)
|
||||
# - [Summary Table](#summary-table)
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Overall Structure
|
||||
#
|
||||
# - **video_controller_new.py**
|
||||
# - Modernized, modular, and debug-heavy.
|
||||
# - Uses enhanced annotation utilities, more robust fallback logic, and detailed debug output.
|
||||
# - Violation detection logic is inlined and self-contained.
|
||||
# - State machine for per-vehicle violation tracking is explicit and debugged.
|
||||
# - Crosswalk/violation line detection is always run, with fallback.
|
||||
# - Always emits overlays and signals, even if no violators.
|
||||
#
|
||||
# - **video_controller_finale.py**
|
||||
# - Reference implementation, known to work reliably in production.
|
||||
# - May use some different utility imports and slightly different state handling.
|
||||
# - Violation detection logic may be more tightly coupled to tracker or external detector class.
|
||||
# - Debug output is present but may be less granular.
|
||||
# - Fallbacks for violation line and traffic light are robust.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Class/Function Inventory
|
||||
#
|
||||
# | Function/Class | In New | In Finale | Notes |
|
||||
# |-------------------------------|--------|-----------|-------|
|
||||
# | VideoController | ✔ | ✔ | Main class in both |
|
||||
# | __init__ | ✔ | ✔ | New: more debug, explicit tracker, fallback logic |
|
||||
# | set_source | ✔ | ✔ | Similar, new has more robust type handling |
|
||||
# | _get_source_properties | ✔ | ✔ | Similar, new has more debug |
|
||||
# | start/stop | ✔ | ✔ | Similar, new has more debug |
|
||||
# | _run | ✔ | ✔ | New: more debug, more robust detection/tracking |
|
||||
# | _process_frame | ✔ | ✔ | New: always runs crosswalk, overlays, fallback |
|
||||
# | detect_red_light_violations | ✔ | ✔ | New: inlined, explicit state machine, more debug |
|
||||
# | violation_detector (external) | ✖ | ✔ | Finale may use RedLightViolationDetector class |
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Function-by-Function Comparison
|
||||
#
|
||||
# ### __init__
|
||||
# - **New:**
|
||||
# - Sets up all state, tracker, and debug counters.
|
||||
# - Imports and initializes crosswalk detection utilities with try/except.
|
||||
# - Does NOT use external `RedLightViolationDetector` (commented out).
|
||||
# - Uses inlined `detect_red_light_violations` method.
|
||||
# - **Finale:**
|
||||
# - May use external `RedLightViolationDetector` class for violation logic.
|
||||
# - Similar state setup, but possibly less debug output.
|
||||
#
|
||||
# ### set_source
|
||||
# - **New:**
|
||||
# - Handles all source types robustly (file, camera, URL, device).
|
||||
# - More debug output for every branch.
|
||||
# - **Finale:**
|
||||
# - Similar logic, possibly less robust for edge cases.
|
||||
#
|
||||
# ### _get_source_properties
|
||||
# - **New:**
|
||||
# - More debug output, retries for camera sources.
|
||||
# - **Finale:**
|
||||
# - Similar, but may not retry as aggressively.
|
||||
#
|
||||
# ### start/stop
|
||||
# - **New:**
|
||||
# - More debug output, aggressive render timer (10ms).
|
||||
# - **Finale:**
|
||||
# - Standard start/stop, less debug.
|
||||
#
|
||||
# ### _run
|
||||
# - **New:**
|
||||
# - Handles detection, tracking, and annotation in one loop.
|
||||
# - Always normalizes class names.
|
||||
# - Always draws overlays and emits signals.
|
||||
# - More debug output for every step.
|
||||
# - **Finale:**
|
||||
# - Similar, but may use external violation detector.
|
||||
# - May not always emit overlays if no detections.
|
||||
#
|
||||
# ### _process_frame
|
||||
# - **New:**
|
||||
# - Always runs crosswalk/violation line detection.
|
||||
# - Always overlays violation line and traffic light status.
|
||||
# - Only runs violation detection if both red light and violation line are present.
|
||||
# - Always emits overlays/signals, even if no violators.
|
||||
# - More robust fallback for violation line.
|
||||
# - More debug output for every step.
|
||||
# - **Finale:**
|
||||
# - Similar, but may skip overlays if no detections.
|
||||
# - May use external violation detector.
|
||||
#
|
||||
# ### detect_red_light_violations
|
||||
# - **New:**
|
||||
# - Inlined, explicit state machine for per-vehicle tracking.
|
||||
# - Requires vehicle to be behind the line before crossing during red.
|
||||
# - Cooldown logic to prevent duplicate violations.
|
||||
# - Extensive debug output for every vehicle, every frame.
|
||||
# - **Finale:**
|
||||
# - May use external class for violation logic.
|
||||
# - Similar state machine, but less debug output.
|
||||
# - May have slightly different fallback/cooldown logic.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Key Differences and Impact
|
||||
#
|
||||
# - **External Violation Detector:**
|
||||
# - Finale uses `RedLightViolationDetector` class; New inlines the logic.
|
||||
# - Impact: New is easier to debug and modify, but harder to swap out logic.
|
||||
#
|
||||
# - **Debug Output:**
|
||||
# - New has much more granular debug output for every step and every vehicle.
|
||||
# - Impact: Easier to diagnose issues in New.
|
||||
#
|
||||
# - **Fallback Logic:**
|
||||
# - Both have robust fallback for violation line and traffic light, but New is more explicit.
|
||||
#
|
||||
# - **Overlay/Signal Emission:**
|
||||
# - New always emits overlays and signals, even if no violators.
|
||||
# - Finale may skip if no detections.
|
||||
#
|
||||
# - **State Machine:**
|
||||
# - New's state machine is explicit, per-vehicle, and debugged.
|
||||
# - Finale's may be more implicit or handled in external class.
|
||||
#
|
||||
# - **Modularity:**
|
||||
# - Finale is more modular (external detector class), New is more monolithic but easier to trace.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Summary Table
|
||||
#
|
||||
# | Feature/Function | video_controller_new.py | video_controller_finale.py |
|
||||
# |---------------------------------|:----------------------:|:-------------------------:|
|
||||
# | External Violation Detector | ✖ | ✔ |
|
||||
# | Inlined Violation Logic | ✔ | ✖ |
|
||||
# | Robust Fallbacks | ✔ | ✔ |
|
||||
# | Always Emits Overlays/Signals | ✔ | ✖/Partial |
|
||||
# | Extensive Debug Output | ✔ | ✖/Partial |
|
||||
# | Per-Vehicle State Machine | ✔ | ✔ |
|
||||
# | Modularity | ✖ | ✔ |
|
||||
# | Easy to Debug/Modify | ✔ | ✖/Partial |
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# ## Conclusion
|
||||
#
|
||||
# - Use `video_controller_new.py` for maximum debug visibility, easier modification, and robust fallback logic.
|
||||
# - Use `video_controller_finale.py` for production-proven modularity and if you want to swap out violation logic easily.
|
||||
# - Both are robust, but the new version is more transparent and easier to debug in real-world scenarios.
|
||||
#
|
||||
# ---
|
||||
#
|
||||
# *This file is auto-generated for developer reference. Update as code evolves.*
|
||||
394
qt_app_pyside1/controllers/embedder_import_patch.py
Normal file
394
qt_app_pyside1/controllers/embedder_import_patch.py
Normal file
@@ -0,0 +1,394 @@
|
||||
from deep_sort_realtime.embedder.embedder_pytorch import MobileNetV2_Embedder
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import OpenVINO modules
|
||||
from detection_openvino import OpenVINOVehicleDetector
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
|
||||
# Import from our utils package
|
||||
from utils.helpers import bbox_iou
|
||||
|
||||
class ModelManager:
|
||||
"""
|
||||
Manages OpenVINO models for traffic detection and violation monitoring.
|
||||
Only uses RedLightViolationPipeline for all violation/crosswalk/traffic light logic.
|
||||
"""
|
||||
def __init__(self, config_file: str = None):
|
||||
"""
|
||||
Initialize model manager with configuration.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.detector = None
|
||||
self.violation_pipeline = None # Use RedLightViolationPipeline only
|
||||
self.tracker = None
|
||||
self._initialize_models()
|
||||
|
||||
def _load_config(self, config_file: Optional[str]) -> Dict:
|
||||
"""
|
||||
Load configuration from file or use defaults.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
"""
|
||||
import json
|
||||
default_config = {
|
||||
"detection": {
|
||||
"confidence_threshold": 0.5,
|
||||
"enable_ocr": True,
|
||||
"enable_tracking": True,
|
||||
"model_path": None
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": True,
|
||||
"show_labels": True,
|
||||
"show_license_plates": True
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
}
|
||||
}
|
||||
|
||||
if config_file and os.path.exists(config_file):
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
loaded_config = json.load(f)
|
||||
# Merge with defaults (preserving loaded values)
|
||||
for section in default_config:
|
||||
if section in loaded_config:
|
||||
default_config[section].update(loaded_config[section])
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
|
||||
return default_config
|
||||
|
||||
def _initialize_models(self):
|
||||
"""Initialize OpenVINO detection and violation models."""
|
||||
try:
|
||||
# Find best model path
|
||||
model_path = self.config["detection"].get("model_path")
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
model_path = self._find_best_model_path()
|
||||
if not model_path:
|
||||
print("❌ No model found")
|
||||
return
|
||||
|
||||
# Initialize detector
|
||||
print(f"✅ Initializing OpenVINO detector with model: {model_path}")
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
print(f"✅ Using inference device: {device}")
|
||||
self.detector = OpenVINOVehicleDetector(
|
||||
model_path=model_path,
|
||||
device=device,
|
||||
confidence_threshold=self.config["detection"]["confidence_threshold"]
|
||||
)
|
||||
|
||||
# Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic
|
||||
self.violation_pipeline = RedLightViolationPipeline(debug=True)
|
||||
print("✅ Red light violation pipeline initialized (all other violation logic removed)")
|
||||
|
||||
# Initialize tracker if enabled
|
||||
if self.config["detection"]["enable_tracking"]:
|
||||
try:
|
||||
from deep_sort_realtime.deepsort_tracker import DeepSort
|
||||
|
||||
# Use optimized OpenVINO embedder if available
|
||||
use_optimized_embedder = True
|
||||
embedder = None
|
||||
|
||||
if use_optimized_embedder:
|
||||
try:
|
||||
# Try importing our custom OpenVINO embedder
|
||||
from utils.embedder_openvino import OpenVINOEmbedder
|
||||
print(f"✅ Initializing optimized OpenVINO embedder on {device}")
|
||||
|
||||
# Set model_path explicitly to use the user-supplied model
|
||||
script_dir = Path(__file__).parent.parent
|
||||
model_file_path = None
|
||||
|
||||
# Try the copy version first (might be modified for compatibility)
|
||||
copy_model_path = script_dir / "mobilenetv2 copy.xml"
|
||||
original_model_path = script_dir / "mobilenetv2.xml"
|
||||
|
||||
if copy_model_path.exists():
|
||||
model_file_path = str(copy_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
elif original_model_path.exists():
|
||||
model_file_path = str(original_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
|
||||
embedder = OpenVINOEmbedder(
|
||||
model_path=model_file_path,
|
||||
device=device,
|
||||
half=True # Use FP16 for better performance
|
||||
)
|
||||
except Exception as emb_err:
|
||||
print(f"⚠️ OpenVINO embedder failed: {emb_err}, falling back to default")
|
||||
|
||||
# Initialize tracker with embedder based on available parameters
|
||||
if embedder is None:
|
||||
print("⚠️ No embedder available, using DeepSORT with default tracking")
|
||||
else:
|
||||
print("✅ Initializing DeepSORT with custom embedder")
|
||||
|
||||
# Simple initialization without problematic parameters
|
||||
self.tracker = DeepSort(
|
||||
max_age=30,
|
||||
n_init=3,
|
||||
nn_budget=100,
|
||||
embedder=embedder
|
||||
)
|
||||
print("✅ DeepSORT tracker initialized")
|
||||
except ImportError:
|
||||
print("⚠️ DeepSORT not available")
|
||||
self.tracker = None
|
||||
print("✅ Models initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error initializing models: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]:
|
||||
"""
|
||||
Find best available model file in workspace.
|
||||
|
||||
Args:
|
||||
base_model_name: Base model name without extension
|
||||
|
||||
Returns:
|
||||
Path to model file or None
|
||||
"""
|
||||
# Select model based on device if base_model_name is not specified
|
||||
if base_model_name is None:
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
if device == "CPU" or device == "AUTO":
|
||||
# Use yolo11n for CPU - faster, lighter model
|
||||
base_model_name = "yolo11n"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
|
||||
else:
|
||||
# Use yolo11x for GPU - larger model with better accuracy
|
||||
base_model_name = "yolo11x"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
|
||||
|
||||
# Check if the openvino_models directory exists in the current working directory
|
||||
cwd_openvino_dir = Path.cwd() / "openvino_models"
|
||||
if cwd_openvino_dir.exists():
|
||||
direct_path = cwd_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model directly in CWD: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for absolute path to openvino_models (this is the most reliable)
|
||||
absolute_openvino_dir = Path("D:/Downloads/finale6/khatam/openvino_models")
|
||||
if absolute_openvino_dir.exists():
|
||||
direct_path = absolute_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model at absolute path: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Try relative to the model_manager.py file
|
||||
openvino_models_dir = Path(__file__).parent.parent.parent / "openvino_models"
|
||||
direct_path = openvino_models_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model in app directory: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for model in folder structure within openvino_models
|
||||
subfolder_path = openvino_models_dir / f"{base_model_name}_openvino_model" / f"{base_model_name}.xml"
|
||||
if subfolder_path.exists():
|
||||
print(f"✅ Found model in subfolder: {subfolder_path}")
|
||||
return str(subfolder_path.absolute())
|
||||
|
||||
# Try other common locations
|
||||
search_dirs = [
|
||||
".",
|
||||
"..",
|
||||
"../models",
|
||||
"../rcb",
|
||||
"../openvino_models",
|
||||
f"../{base_model_name}_openvino_model",
|
||||
"../..", # Go up to project root
|
||||
"../../openvino_models", # Project root / openvino_models
|
||||
]
|
||||
|
||||
model_extensions = [
|
||||
(f"{base_model_name}.xml", "OpenVINO IR direct"),
|
||||
(f"{base_model_name}_openvino_model/{base_model_name}.xml", "OpenVINO IR"),
|
||||
(f"{base_model_name}.pt", "PyTorch"),
|
||||
]
|
||||
|
||||
for search_dir in search_dirs:
|
||||
search_path = Path(__file__).parent.parent / search_dir
|
||||
if not search_path.exists():
|
||||
continue
|
||||
|
||||
for model_file, model_type in model_extensions:
|
||||
model_path = search_path / model_file
|
||||
if model_path.exists():
|
||||
print(f"✅ Found {model_type} model: {model_path}")
|
||||
return str(model_path.absolute())
|
||||
|
||||
print(f"❌ No model found for {base_model_name}")
|
||||
return None
|
||||
|
||||
def detect(self, frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Detect objects in frame.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
|
||||
Returns:
|
||||
List of detection dictionaries
|
||||
"""
|
||||
if self.detector is None:
|
||||
print("WARNING: No detector available")
|
||||
return []
|
||||
try:
|
||||
# Use a lower confidence threshold for better visibility
|
||||
conf_threshold = max(0.3, self.config["detection"].get("confidence_threshold", 0.5))
|
||||
detections = self.detector.detect_vehicles(frame, conf_threshold=conf_threshold)
|
||||
|
||||
# Add debug output
|
||||
if detections:
|
||||
print(f"DEBUG: Detected {len(detections)} objects: " +
|
||||
", ".join([f"{d['class_name']} ({d['confidence']:.2f})" for d in detections[:3]]))
|
||||
|
||||
# Print bounding box coordinates of first detection
|
||||
if len(detections) > 0:
|
||||
print(f"DEBUG: First detection bbox: {detections[0]['bbox']}")
|
||||
else:
|
||||
print("DEBUG: No detections in this frame")
|
||||
|
||||
return detections
|
||||
except Exception as e:
|
||||
print(f"❌ Detection error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracking(self, detections: List[Dict], frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Update tracking information for detections.
|
||||
|
||||
Args:
|
||||
detections: List of detections
|
||||
frame: Current video frame
|
||||
|
||||
Returns:
|
||||
Updated list of detections with tracking info
|
||||
"""
|
||||
if not self.tracker or not detections:
|
||||
return detections
|
||||
|
||||
try:
|
||||
# Format detections for DeepSORT
|
||||
tracker_dets = []
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
x1, y1, x2, y2 = bbox
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
|
||||
if w <= 0 or h <= 0:
|
||||
continue
|
||||
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_name = det.get('class_name', 'unknown')
|
||||
tracker_dets.append(([x1, y1, w, h], conf, class_name))
|
||||
|
||||
# Update tracks
|
||||
if tracker_dets:
|
||||
tracks = self.tracker.update_tracks(tracker_dets, frame=frame)
|
||||
|
||||
# Associate tracks with detections
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
|
||||
track_id = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
dx1, dy1, dx2, dy2 = bbox
|
||||
iou = bbox_iou((dx1, dy1, dx2, dy2), tuple(map(int, ltrb)))
|
||||
|
||||
if iou > 0.5:
|
||||
det['track_id'] = track_id
|
||||
break
|
||||
return detections
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Tracking error: {e}")
|
||||
return detections
|
||||
|
||||
def update_config(self, new_config: Dict):
|
||||
"""
|
||||
Update configuration parameters.
|
||||
|
||||
Args:
|
||||
new_config: New configuration dictionary
|
||||
"""
|
||||
if not new_config:
|
||||
return
|
||||
|
||||
# Store old device setting to check if it changed
|
||||
old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO"
|
||||
|
||||
# Update configuration
|
||||
for section in new_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(new_config[section])
|
||||
else:
|
||||
self.config[section] = new_config[section]
|
||||
|
||||
# Check if device changed - if so, we need to reinitialize models
|
||||
new_device = self.config["detection"].get("device", "AUTO")
|
||||
device_changed = old_device != new_device
|
||||
|
||||
if device_changed:
|
||||
print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...")
|
||||
# Reinitialize models with new device
|
||||
self._initialize_models()
|
||||
return
|
||||
|
||||
# Just update detector confidence threshold if device didn't change
|
||||
if self.detector:
|
||||
conf_thres = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
self.detector.conf_thres = conf_thres
|
||||
686
qt_app_pyside1/controllers/enhanced_video_controller.py
Normal file
686
qt_app_pyside1/controllers/enhanced_video_controller.py
Normal file
@@ -0,0 +1,686 @@
|
||||
"""
|
||||
Enhanced video controller with async inference and separated FPS tracking
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
from typing import Dict, List, Optional, Tuple, Any
|
||||
from pathlib import Path
|
||||
from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer
|
||||
from PySide6.QtGui import QImage, QPixmap
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import our async detector
|
||||
try:
|
||||
# Try direct import first
|
||||
from detection_openvino_async import OpenVINOVehicleDetector
|
||||
except ImportError:
|
||||
# Fall back to import from project root
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||
from detection_openvino_async import OpenVINOVehicleDetector
|
||||
|
||||
# Import traffic light color detection utility
|
||||
try:
|
||||
from utils.traffic_light_utils import detect_traffic_light_color, draw_traffic_light_status
|
||||
print("✅ Imported traffic light color detection utilities")
|
||||
except ImportError:
|
||||
# Create simple placeholder functions if imports fail
|
||||
def detect_traffic_light_color(frame, bbox):
|
||||
return {"color": "unknown", "confidence": 0.0}
|
||||
|
||||
def draw_traffic_light_status(frame, bbox, color):
|
||||
return frame
|
||||
print("⚠️ Failed to import traffic light color detection utilities")
|
||||
|
||||
# Import utilities for visualization
|
||||
try:
|
||||
# Try the direct import when running inside the qt_app_pyside directory
|
||||
from utils.enhanced_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Successfully imported enhanced_annotation_utils from utils package")
|
||||
except ImportError:
|
||||
try:
|
||||
# Try fully qualified import path
|
||||
from qt_app_pyside.utils.enhanced_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Successfully imported enhanced_annotation_utils from qt_app_pyside.utils package")
|
||||
except ImportError:
|
||||
# Fall back to our minimal implementation
|
||||
print("⚠️ Could not import enhanced_annotation_utils, using fallback implementation")
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||
try:
|
||||
from fallback_annotation_utils import (
|
||||
enhanced_draw_detections,
|
||||
draw_performance_overlay,
|
||||
enhanced_cv_to_qimage,
|
||||
enhanced_cv_to_pixmap
|
||||
)
|
||||
print("✅ Using fallback_annotation_utils")
|
||||
except ImportError:
|
||||
print("❌ CRITICAL: Could not import annotation utilities! UI will be broken.")
|
||||
# Define minimal stub functions to prevent crashes
|
||||
def enhanced_draw_detections(frame, detections, **kwargs):
|
||||
return frame
|
||||
def draw_performance_overlay(frame, metrics):
|
||||
return frame
|
||||
def enhanced_cv_to_qimage(frame):
|
||||
return None
|
||||
def enhanced_cv_to_pixmap(frame):
|
||||
return None
|
||||
|
||||
class AsyncVideoProcessingThread(QThread):
|
||||
"""Thread for async video processing with separate detection and UI threads."""
|
||||
|
||||
# Signal for UI update with enhanced metadata
|
||||
frame_processed = Signal(np.ndarray, list, dict) # frame, detections, metrics
|
||||
|
||||
# Signal for separate processing metrics
|
||||
stats_updated = Signal(dict) # All performance metrics
|
||||
|
||||
def __init__(self, model_manager, parent=None):
|
||||
super().__init__(parent)
|
||||
self.model_manager = model_manager
|
||||
self.running = False
|
||||
self.paused = False
|
||||
|
||||
# Video source
|
||||
self.source = 0
|
||||
self.cap = None
|
||||
self.source_fps = 0
|
||||
self.target_fps = 30 # Target FPS for UI updates
|
||||
|
||||
# Performance tracking
|
||||
self.detection_fps = 0
|
||||
self.ui_fps = 0
|
||||
self.frame_count = 0
|
||||
self.start_time = 0
|
||||
self.detection_times = deque(maxlen=30) # Last 30 detection times
|
||||
self.ui_frame_times = deque(maxlen=30) # Last 30 UI frame times
|
||||
self.last_ui_frame_time = 0
|
||||
|
||||
# Mutexes for thread safety
|
||||
self.mutex = QMutex()
|
||||
self.wait_condition = QWaitCondition()
|
||||
|
||||
# FPS limiter to avoid CPU overload
|
||||
self.last_frame_time = 0
|
||||
self.min_frame_interval = 1.0 / 60 # Max 60 FPS
|
||||
|
||||
# Async processing queue with frame IDs
|
||||
self.frame_queue = [] # List of (frame_id, frame) tuples
|
||||
self.next_frame_id = 0
|
||||
self.processed_frames = {} # frame_id -> (frame, detections, metrics)
|
||||
self.last_emitted_frame_id = -1
|
||||
# Separate UI thread timer for smooth display
|
||||
self.ui_timer = QTimer()
|
||||
self.ui_timer.timeout.connect(self._emit_next_frame)
|
||||
|
||||
def set_source(self, source):
|
||||
"""Set video source - camera index or file path."""
|
||||
print(f"[AsyncThread] set_source: {source} ({type(source)})")
|
||||
if source is None:
|
||||
self.source = 0
|
||||
elif isinstance(source, str) and os.path.isfile(source):
|
||||
self.source = source
|
||||
elif isinstance(source, int):
|
||||
self.source = source
|
||||
else:
|
||||
print("[AsyncThread] Invalid source, defaulting to camera")
|
||||
self.source = 0
|
||||
|
||||
def start_processing(self):
|
||||
"""Start video processing."""
|
||||
self.running = True
|
||||
self.start()
|
||||
# Start UI timer for smooth frame emission
|
||||
self.ui_timer.start(int(1000 / self.target_fps))
|
||||
|
||||
def stop_processing(self):
|
||||
"""Stop video processing."""
|
||||
self.running = False
|
||||
self.wait_condition.wakeAll()
|
||||
self.wait()
|
||||
self.ui_timer.stop()
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
def pause_processing(self):
|
||||
"""Pause video processing."""
|
||||
self.mutex.lock()
|
||||
self.paused = True
|
||||
self.mutex.unlock()
|
||||
|
||||
def resume_processing(self):
|
||||
"""Resume video processing."""
|
||||
self.mutex.lock()
|
||||
self.paused = False
|
||||
self.wait_condition.wakeAll()
|
||||
self.mutex.unlock()
|
||||
|
||||
def run(self):
|
||||
"""Main thread execution loop."""
|
||||
self._initialize_video()
|
||||
self.start_time = time.time()
|
||||
self.frame_count = 0
|
||||
|
||||
while self.running:
|
||||
# Check if paused
|
||||
self.mutex.lock()
|
||||
if self.paused:
|
||||
self.wait_condition.wait(self.mutex)
|
||||
self.mutex.unlock()
|
||||
|
||||
if not self.running:
|
||||
break
|
||||
|
||||
# Control frame rate
|
||||
current_time = time.time()
|
||||
time_diff = current_time - self.last_frame_time
|
||||
if time_diff < self.min_frame_interval:
|
||||
time.sleep(self.min_frame_interval - time_diff)
|
||||
|
||||
# Read frame
|
||||
ret, frame = self.cap.read()
|
||||
self.last_frame_time = time.time()
|
||||
|
||||
if not ret or frame is None:
|
||||
print("End of video or failed to read frame")
|
||||
# Check if we're using a file and should restart
|
||||
if isinstance(self.source, str) and os.path.isfile(self.source):
|
||||
self._initialize_video() # Restart video
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
# Process frame asynchronously
|
||||
self._process_frame_async(frame)
|
||||
|
||||
# Update frame counter
|
||||
self.frame_count += 1
|
||||
|
||||
# Clean up when thread exits
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
def _initialize_video(self):
|
||||
"""Initialize video source."""
|
||||
try:
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
|
||||
print(f"[EnhancedVideoController] _initialize_video: self.source = {self.source} (type: {type(self.source)})")
|
||||
# Only use camera if source is int or '0', else use file path
|
||||
if isinstance(self.source, int):
|
||||
self.cap = cv2.VideoCapture(self.source)
|
||||
elif isinstance(self.source, str) and os.path.isfile(self.source):
|
||||
self.cap = cv2.VideoCapture(self.source)
|
||||
else:
|
||||
print(f"[EnhancedVideoController] Invalid source: {self.source}, not opening VideoCapture.")
|
||||
return False
|
||||
|
||||
if not self.cap.isOpened():
|
||||
print(f"Failed to open video source: {self.source}")
|
||||
return False
|
||||
|
||||
# Get source FPS
|
||||
self.source_fps = self.cap.get(cv2.CAP_PROP_FPS)
|
||||
if self.source_fps <= 0:
|
||||
self.source_fps = 30 # Default fallback
|
||||
|
||||
print(f"Video source initialized: {self.source}, FPS: {self.source_fps}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error initializing video: {e}")
|
||||
return False
|
||||
|
||||
def _process_frame_async(self, frame):
|
||||
"""Process a frame with async detection."""
|
||||
try:
|
||||
# Start detection timer
|
||||
detection_start = time.time()
|
||||
|
||||
# Assign frame ID
|
||||
frame_id = self.next_frame_id
|
||||
self.next_frame_id += 1
|
||||
|
||||
# Get detector and start async inference
|
||||
detector = self.model_manager.detector
|
||||
|
||||
# Check if detector supports async API
|
||||
if hasattr(detector, 'detect_async_start'):
|
||||
# Use async API
|
||||
inf_frame_id = detector.detect_async_start(frame)
|
||||
|
||||
# Store frame in queue with the right ID
|
||||
self.mutex.lock()
|
||||
self.frame_queue.append((frame_id, frame, inf_frame_id))
|
||||
self.mutex.unlock()
|
||||
|
||||
# Try getting results from previous frames
|
||||
self._check_async_results()
|
||||
|
||||
else:
|
||||
# Fallback to synchronous API
|
||||
detections = self.model_manager.detect(frame)
|
||||
|
||||
# Calculate detection time
|
||||
detection_time = time.time() - detection_start
|
||||
self.detection_times.append(detection_time)
|
||||
|
||||
# Update detection FPS
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed > 0:
|
||||
self.detection_fps = self.frame_count / elapsed
|
||||
|
||||
# Calculate detection metrics
|
||||
detection_ms = detection_time * 1000
|
||||
avg_detection_ms = np.mean(self.detection_times) * 1000
|
||||
|
||||
# Store metrics
|
||||
metrics = {
|
||||
'detection_fps': self.detection_fps,
|
||||
'detection_ms': detection_ms,
|
||||
'avg_detection_ms': avg_detection_ms,
|
||||
'frame_id': frame_id
|
||||
}
|
||||
|
||||
# Store processed frame
|
||||
self.mutex.lock()
|
||||
self.processed_frames[frame_id] = (frame, detections, metrics)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Emit stats update
|
||||
self.stats_updated.emit(metrics)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in frame processing: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _check_async_results(self):
|
||||
"""Check for completed async inference requests."""
|
||||
try:
|
||||
detector = self.model_manager.detector
|
||||
if not hasattr(detector, 'detect_async_get_result'):
|
||||
return
|
||||
|
||||
# Get any frames waiting for results
|
||||
self.mutex.lock()
|
||||
queue_copy = self.frame_queue.copy()
|
||||
self.mutex.unlock()
|
||||
|
||||
processed_frames = []
|
||||
|
||||
# Check each frame in the queue
|
||||
for idx, (frame_id, frame, inf_frame_id) in enumerate(queue_copy):
|
||||
# Try to get results without waiting
|
||||
detections = detector.detect_async_get_result(inf_frame_id, wait=False)
|
||||
|
||||
# If results are ready
|
||||
if detections is not None:
|
||||
# Calculate metrics
|
||||
detection_time = time.time() - detector.active_requests[inf_frame_id][2] if inf_frame_id in detector.active_requests else 0
|
||||
self.detection_times.append(detection_time)
|
||||
|
||||
# Update detection FPS
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed > 0:
|
||||
self.detection_fps = self.frame_count / elapsed
|
||||
|
||||
# Calculate metrics
|
||||
detection_ms = detection_time * 1000
|
||||
avg_detection_ms = np.mean(self.detection_times) * 1000
|
||||
|
||||
# Store metrics
|
||||
metrics = {
|
||||
'detection_fps': self.detection_fps,
|
||||
'detection_ms': detection_ms,
|
||||
'avg_detection_ms': avg_detection_ms,
|
||||
'frame_id': frame_id
|
||||
}
|
||||
|
||||
# Store processed frame
|
||||
self.mutex.lock()
|
||||
self.processed_frames[frame_id] = (frame, detections, metrics)
|
||||
processed_frames.append(frame_id)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Emit stats update
|
||||
self.stats_updated.emit(metrics)
|
||||
|
||||
# Remove processed frames from queue
|
||||
if processed_frames:
|
||||
self.mutex.lock()
|
||||
self.frame_queue = [item for item in self.frame_queue
|
||||
if item[0] not in processed_frames]
|
||||
self.mutex.unlock()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error checking async results: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _emit_next_frame(self):
|
||||
"""Emit the next processed frame to UI at a controlled rate."""
|
||||
try:
|
||||
# Update UI FPS calculation
|
||||
current_time = time.time()
|
||||
if self.last_ui_frame_time > 0:
|
||||
ui_frame_time = current_time - self.last_ui_frame_time
|
||||
self.ui_frame_times.append(ui_frame_time)
|
||||
self.ui_fps = 1.0 / ui_frame_time if ui_frame_time > 0 else 0
|
||||
self.last_ui_frame_time = current_time
|
||||
|
||||
# Check async results first
|
||||
self._check_async_results()
|
||||
|
||||
# Find the next frame to emit
|
||||
self.mutex.lock()
|
||||
available_frames = sorted(self.processed_frames.keys())
|
||||
self.mutex.unlock()
|
||||
|
||||
if not available_frames:
|
||||
return
|
||||
|
||||
next_frame_id = available_frames[0]
|
||||
|
||||
# Get the frame data
|
||||
self.mutex.lock()
|
||||
frame, detections, metrics = self.processed_frames.pop(next_frame_id)
|
||||
self.mutex.unlock()
|
||||
|
||||
# Add UI FPS to metrics
|
||||
metrics['ui_fps'] = self.ui_fps
|
||||
|
||||
# Apply tracking if available
|
||||
if self.model_manager.tracker:
|
||||
detections = self.model_manager.update_tracking(detections, frame)
|
||||
|
||||
# Emit the frame to the UI
|
||||
self.frame_processed.emit(frame, detections, metrics)
|
||||
|
||||
# Store as last emitted frame
|
||||
self.last_emitted_frame_id = next_frame_id
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error emitting frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
class EnhancedVideoController(QObject):
|
||||
"""
|
||||
Enhanced video controller with better file handling and statistics.
|
||||
"""
|
||||
# Define signals
|
||||
frame_ready = Signal(QPixmap) # Frame as QPixmap for direct display
|
||||
frame_np_ready = Signal(np.ndarray) # Frame as NumPy array
|
||||
raw_frame_ready = Signal(dict) # Raw frame data with detections
|
||||
stats_ready = Signal(dict) # All performance stats (dictionary with fps and detection_time)
|
||||
|
||||
# Add instance variable to track the most recent traffic light color
|
||||
def __init__(self, model_manager=None):
|
||||
"""Initialize the video controller"""
|
||||
super().__init__()
|
||||
|
||||
# Input source
|
||||
self._source = 0 # Default to camera 0
|
||||
self._source_type = "camera"
|
||||
self._running = False
|
||||
self._last_traffic_light_color = "unknown"
|
||||
|
||||
# Regular Controller instance variables
|
||||
self.model_manager = model_manager
|
||||
self.processing_thread = None
|
||||
self.show_annotations = True
|
||||
self.show_fps = True
|
||||
self.save_video = False
|
||||
self.video_writer = None
|
||||
|
||||
def set_source(self, source):
|
||||
"""Set video source - camera index or file path."""
|
||||
print(f"[EnhancedVideoController] set_source: {source} ({type(source)})")
|
||||
if self.processing_thread:
|
||||
self.processing_thread.set_source(source)
|
||||
|
||||
def start(self):
|
||||
"""Start video processing."""
|
||||
if self.processing_thread and self.processing_thread.running:
|
||||
return
|
||||
|
||||
# Create new processing thread
|
||||
self.processing_thread = AsyncVideoProcessingThread(self.model_manager)
|
||||
|
||||
# Connect signals
|
||||
self.processing_thread.frame_processed.connect(self._on_frame_processed)
|
||||
self.processing_thread.stats_updated.connect(self._on_stats_updated)
|
||||
|
||||
# Start processing
|
||||
self.processing_thread.start_processing()
|
||||
|
||||
def stop(self):
|
||||
"""Stop video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.stop_processing()
|
||||
self.processing_thread = None
|
||||
|
||||
if self.video_writer:
|
||||
self.video_writer.release()
|
||||
self.video_writer = None
|
||||
|
||||
def pause(self):
|
||||
"""Pause video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.pause_processing()
|
||||
|
||||
def resume(self):
|
||||
"""Resume video processing."""
|
||||
if self.processing_thread:
|
||||
self.processing_thread.resume_processing()
|
||||
|
||||
def toggle_annotations(self, enabled):
|
||||
"""Toggle annotations on/off."""
|
||||
self.show_annotations = enabled
|
||||
|
||||
def toggle_fps_display(self, enabled):
|
||||
"""Toggle FPS display on/off."""
|
||||
self.show_fps = enabled
|
||||
|
||||
def start_recording(self, output_path, frame_size=(640, 480), fps=30):
|
||||
"""Start recording video to file."""
|
||||
self.save_video = True
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
||||
self.video_writer = cv2.VideoWriter(
|
||||
output_path, fourcc, fps,
|
||||
(frame_size[0], frame_size[1])
|
||||
)
|
||||
|
||||
def stop_recording(self):
|
||||
"""Stop recording video."""
|
||||
self.save_video = False
|
||||
if self.video_writer:
|
||||
self.video_writer.release()
|
||||
self.video_writer = None
|
||||
|
||||
def _on_frame_processed(self, frame, detections, metrics):
|
||||
"""Handle processed frame from the worker thread."""
|
||||
try:
|
||||
# Create a copy of the frame for annotation
|
||||
display_frame = frame.copy()
|
||||
|
||||
# Apply annotations if enabled
|
||||
if self.show_annotations and detections:
|
||||
display_frame = enhanced_draw_detections(display_frame, detections) # Detect and annotate traffic light colors
|
||||
for detection in detections:
|
||||
# Check for both class_id 9 (COCO) and any other traffic light classes
|
||||
if detection.get('class_id') == 9 or detection.get('class_name') == 'traffic light':
|
||||
bbox = detection.get('bbox')
|
||||
if not bbox:
|
||||
continue
|
||||
|
||||
# Get traffic light color
|
||||
color = detect_traffic_light_color(frame, bbox)
|
||||
# Store the latest traffic light color
|
||||
self._last_traffic_light_color = color
|
||||
# Draw traffic light status
|
||||
display_frame = draw_traffic_light_status(display_frame, bbox, color)
|
||||
print(f"🚦 Traffic light detected with color: {color}")
|
||||
|
||||
# Add FPS counter if enabled
|
||||
if self.show_fps:
|
||||
# Add both detection and UI FPS
|
||||
detection_fps = metrics.get('detection_fps', 0)
|
||||
ui_fps = metrics.get('ui_fps', 0)
|
||||
detection_ms = metrics.get('avg_detection_ms', 0)
|
||||
|
||||
display_frame = draw_performance_overlay(
|
||||
display_frame,
|
||||
{
|
||||
"Detection FPS": f"{detection_fps:.1f}",
|
||||
"UI FPS": f"{ui_fps:.1f}",
|
||||
"Inference": f"{detection_ms:.1f} ms"
|
||||
}
|
||||
)
|
||||
|
||||
# Save frame if recording
|
||||
if self.save_video and self.video_writer:
|
||||
self.video_writer.write(display_frame)
|
||||
|
||||
# Convert to QPixmap for display
|
||||
pixmap = enhanced_cv_to_pixmap(display_frame)
|
||||
|
||||
# Emit signals
|
||||
self.frame_ready.emit(pixmap, detections, metrics)
|
||||
self.raw_frame_ready.emit(frame, detections, metrics)
|
||||
# Emit numpy frame for compatibility with existing connections
|
||||
self.frame_np_ready.emit(frame)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
def _on_stats_updated(self, stats):
|
||||
"""Handle updated statistics from the worker thread."""
|
||||
try:
|
||||
# Create a proper stats dictionary for the LiveTab
|
||||
ui_stats = {
|
||||
'fps': stats.get('detection_fps', 0.0),
|
||||
'detection_time': stats.get('avg_detection_ms', 0.0),
|
||||
'traffic_light_color': self._last_traffic_light_color
|
||||
}
|
||||
print(f"Emitting stats: {ui_stats}")
|
||||
# Emit as a dictionary - fixed signal/slot mismatch
|
||||
self.stats_ready.emit(ui_stats)
|
||||
except Exception as e:
|
||||
print(f"Error in stats update: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _process_frame_for_display(self, frame, detections, metrics=None):
|
||||
"""Process a frame for display, adding annotations."""
|
||||
try:
|
||||
# Create a copy for display
|
||||
display_frame = frame.copy()
|
||||
# Process traffic light detections to identify colors
|
||||
for det in detections:
|
||||
if det.get('class_name') == 'traffic light':
|
||||
# Get traffic light color
|
||||
bbox = det['bbox']
|
||||
light_color = detect_traffic_light_color(frame, bbox)
|
||||
|
||||
# Add color information to detection
|
||||
det['traffic_light_color'] = light_color
|
||||
|
||||
# Store the latest traffic light color
|
||||
self._last_traffic_light_color = light_color
|
||||
|
||||
# Use specialized drawing for traffic lights
|
||||
display_frame = draw_traffic_light_status(display_frame, bbox, light_color)
|
||||
|
||||
print(f"🚦 Traffic light detected with color: {light_color}")
|
||||
else:
|
||||
# Draw regular detection box
|
||||
bbox = det['bbox']
|
||||
x1, y1, x2, y2 = [int(c) for c in bbox]
|
||||
class_name = det.get('class_name', 'object')
|
||||
confidence = det.get('confidence', 0.0)
|
||||
|
||||
label = f"{class_name} {confidence:.2f}"
|
||||
color = (0, 255, 0) # Green for other objects
|
||||
|
||||
cv2.rectangle(display_frame, (x1, y1), (x2, y2), color, 2)
|
||||
cv2.putText(display_frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
||||
|
||||
# Add tracker visualization if tracking is enabled
|
||||
if self.tracker and hasattr(self, 'visualization_tracks'):
|
||||
# Draw current tracks
|
||||
for track_id, track_info in self.visualization_tracks.items():
|
||||
track_box = track_info.get('box')
|
||||
if track_box:
|
||||
x1, y1, x2, y2 = [int(c) for c in track_box]
|
||||
track_class = track_info.get('class_name', 'tracked')
|
||||
|
||||
# Draw track ID and class
|
||||
cv2.rectangle(display_frame, (x1, y1), (x2, y2), (255, 0, 255), 2)
|
||||
cv2.putText(display_frame, f"{track_class} #{track_id}",
|
||||
(x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
|
||||
|
||||
# Draw trail if available
|
||||
trail = track_info.get('trail', [])
|
||||
if len(trail) > 1:
|
||||
for i in range(1, len(trail)):
|
||||
cv2.line(display_frame,
|
||||
(int(trail[i-1][0]), int(trail[i-1][1])),
|
||||
(int(trail[i][0]), int(trail[i][1])),
|
||||
(255, 0, 255), 2)
|
||||
|
||||
# Add FPS counter if enabled
|
||||
if self.show_fps:
|
||||
# Add both detection and UI FPS
|
||||
detection_fps = metrics.get('detection_fps', 0)
|
||||
ui_fps = metrics.get('ui_fps', 0)
|
||||
detection_ms = metrics.get('avg_detection_ms', 0)
|
||||
|
||||
display_frame = draw_performance_overlay(
|
||||
display_frame,
|
||||
{
|
||||
"Detection FPS": f"{detection_fps:.1f}",
|
||||
"UI FPS": f"{ui_fps:.1f}",
|
||||
"Inference": f"{detection_ms:.1f} ms"
|
||||
}
|
||||
)
|
||||
|
||||
# Save frame if recording
|
||||
if self.save_video and self.video_writer:
|
||||
self.video_writer.write(display_frame)
|
||||
|
||||
# Convert to QPixmap for display
|
||||
pixmap = enhanced_cv_to_pixmap(display_frame)
|
||||
|
||||
# Emit signals
|
||||
self.frame_ready.emit(pixmap, detections, metrics)
|
||||
self.raw_frame_ready.emit(frame, detections, metrics)
|
||||
# Emit numpy frame for compatibility with existing connections
|
||||
self.frame_np_ready.emit(frame)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
474
qt_app_pyside1/controllers/model_manager.py
Normal file
474
qt_app_pyside1/controllers/model_manager.py
Normal file
@@ -0,0 +1,474 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import OpenVINO modules
|
||||
from detection_openvino import OpenVINOVehicleDetector
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
|
||||
# Import from our utils package
|
||||
from utils.helpers import bbox_iou
|
||||
|
||||
class ModelManager:
|
||||
"""
|
||||
Manages OpenVINO models for traffic detection and violation monitoring.
|
||||
Only uses RedLightViolationPipeline for all violation/crosswalk/traffic light logic.
|
||||
"""
|
||||
def __init__(self, config_file: str = None, tracker=None):
|
||||
"""
|
||||
Initialize model manager with configuration.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
tracker: (Optional) External tracker instance (e.g., DeepSortVehicleTracker singleton)
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.detector = None
|
||||
self.violation_pipeline = None # Use RedLightViolationPipeline only
|
||||
self.tracker = tracker
|
||||
self._initialize_models()
|
||||
|
||||
def _load_config(self, config_file: Optional[str]) -> Dict:
|
||||
"""
|
||||
Load configuration from file or use defaults.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
"""
|
||||
import json
|
||||
default_config = {
|
||||
"detection": {
|
||||
"confidence_threshold": 0.3,
|
||||
"enable_ocr": True,
|
||||
"enable_tracking": True,
|
||||
"model_path": None
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": True,
|
||||
"show_labels": True,
|
||||
"show_license_plates": True
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
}
|
||||
}
|
||||
|
||||
if config_file and os.path.exists(config_file):
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
loaded_config = json.load(f)
|
||||
# Merge with defaults (preserving loaded values)
|
||||
for section in default_config:
|
||||
if section in loaded_config:
|
||||
default_config[section].update(loaded_config[section])
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
|
||||
return default_config
|
||||
|
||||
def _initialize_models(self):
|
||||
"""Initialize OpenVINO detection and violation models."""
|
||||
try:
|
||||
# Find best model path
|
||||
model_path = self.config["detection"].get("model_path")
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
model_path = self._find_best_model_path()
|
||||
if not model_path:
|
||||
print("❌ No model found")
|
||||
return
|
||||
|
||||
# Initialize detector
|
||||
print(f"✅ Initializing OpenVINO detector with model: {model_path}")
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
print(f"✅ Using inference device: {device}")
|
||||
self.detector = OpenVINOVehicleDetector(
|
||||
model_path=model_path,
|
||||
device=device,
|
||||
confidence_threshold=self.config["detection"]["confidence_threshold"]
|
||||
)
|
||||
|
||||
# Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic
|
||||
self.violation_pipeline = RedLightViolationPipeline(debug=True)
|
||||
print("✅ Red light violation pipeline initialized (all other violation logic removed)")
|
||||
|
||||
# Only initialize tracker if not provided
|
||||
if self.tracker is None and self.config["detection"]["enable_tracking"]:
|
||||
try:
|
||||
from controllers.bytetrack_tracker import ByteTrackVehicleTracker
|
||||
self.tracker = ByteTrackVehicleTracker()
|
||||
print("✅ ByteTrack tracker initialized (internal)")
|
||||
except ImportError:
|
||||
print("⚠️ ByteTrack not available")
|
||||
self.tracker = None
|
||||
elif self.tracker is not None:
|
||||
print("✅ Using external DeepSORT tracker instance")
|
||||
print("✅ Models initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error initializing models: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]:
|
||||
|
||||
|
||||
if base_model_name is None:
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
if device == "CPU" or device == "AUTO":
|
||||
# Use yolo11n for CPU - faster, lighter model
|
||||
base_model_name = "yolo11n"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
|
||||
else:
|
||||
# Use yolo11x for GPU - larger model with better accuracy
|
||||
base_model_name = "yolo11x"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
|
||||
|
||||
# Check if the openvino_models directory exists in the current working directory
|
||||
cwd_openvino_dir = Path.cwd() / "openvino_models"
|
||||
if cwd_openvino_dir.exists():
|
||||
direct_path = cwd_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model directly in CWD: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for absolute path to openvino_models (this is the most reliable)
|
||||
absolute_openvino_dir = Path("D:/Downloads/finale6/khatam/openvino_models")
|
||||
if absolute_openvino_dir.exists():
|
||||
direct_path = absolute_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model at absolute path: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Try relative to the model_manager.py file
|
||||
openvino_models_dir = Path(__file__).parent.parent.parent / "openvino_models"
|
||||
direct_path = openvino_models_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model in app directory: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for model in folder structure within openvino_models
|
||||
subfolder_path = openvino_models_dir / f"{base_model_name}_openvino_model" / f"{base_model_name}.xml"
|
||||
if subfolder_path.exists():
|
||||
print(f"✅ Found model in subfolder: {subfolder_path}")
|
||||
return str(subfolder_path.absolute())
|
||||
|
||||
# Try other common locations
|
||||
search_dirs = [
|
||||
".",
|
||||
"..",
|
||||
"../models",
|
||||
"../rcb",
|
||||
"../openvino_models",
|
||||
f"../{base_model_name}_openvino_model",
|
||||
"../..", # Go up to project root
|
||||
"../../openvino_models", # Project root / openvino_models
|
||||
]
|
||||
|
||||
model_extensions = [
|
||||
(f"{base_model_name}.xml", "OpenVINO IR direct"),
|
||||
(f"{base_model_name}_openvino_model/{base_model_name}.xml", "OpenVINO IR"),
|
||||
(f"{base_model_name}.pt", "PyTorch"),
|
||||
]
|
||||
|
||||
for search_dir in search_dirs:
|
||||
search_path = Path(__file__).parent.parent / search_dir
|
||||
if not search_path.exists():
|
||||
continue
|
||||
|
||||
for model_file, model_type in model_extensions:
|
||||
model_path = search_path / model_file
|
||||
if model_path.exists():
|
||||
print(f"✅ Found {model_type} model: {model_path}")
|
||||
return str(model_path.absolute())
|
||||
|
||||
print(f"❌ No model found for {base_model_name}")
|
||||
return None
|
||||
|
||||
def detect(self, frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Detect objects in frame.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
|
||||
Returns:
|
||||
List of detection dictionaries
|
||||
"""
|
||||
if self.detector is None:
|
||||
print("WARNING: No detector available")
|
||||
return []
|
||||
try:
|
||||
# Use a lower confidence threshold for better visibility
|
||||
base_conf_threshold = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
conf_threshold = max(0.15, base_conf_threshold) # Lowered to 0.15 for traffic lights
|
||||
detections = self.detector.detect_vehicles(frame, conf_threshold=conf_threshold)
|
||||
# Try to find traffic lights with even lower confidence if none found
|
||||
traffic_light_found = any(det.get('class_name') == 'traffic light' for det in detections)
|
||||
if not traffic_light_found:
|
||||
print("⚠️ No traffic lights detected with normal confidence, trying lower threshold...")
|
||||
try:
|
||||
low_conf_detections = self.detector.detect_vehicles(frame, conf_threshold=0.05)
|
||||
for det in low_conf_detections:
|
||||
if det.get('class_name') == 'traffic light' and det not in detections:
|
||||
print(f"🚦 Adding low confidence traffic light: conf={det['confidence']:.3f}")
|
||||
detections.append(det)
|
||||
except Exception as e:
|
||||
print(f"❌ Error trying low confidence detection: {e}")
|
||||
# Enhance traffic light detection using the same utilities as qt_app_pyside
|
||||
from utils.traffic_light_utils import detect_traffic_light_color, ensure_traffic_light_color
|
||||
for det in detections:
|
||||
if det.get('class_id') == 9 or det.get('class_name') == 'traffic light':
|
||||
try:
|
||||
bbox = det['bbox']
|
||||
light_info = detect_traffic_light_color(frame, bbox)
|
||||
if light_info.get("color", "unknown") == "unknown":
|
||||
light_info = ensure_traffic_light_color(frame, bbox)
|
||||
det['traffic_light_color'] = light_info
|
||||
print(f"🚦 Enhanced Traffic Light Detection: {light_info}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error in enhanced traffic light detection: {e}")
|
||||
# Ensure all detections have valid class_name and confidence
|
||||
for det in detections:
|
||||
if det.get('class_name') is None:
|
||||
det['class_name'] = 'object'
|
||||
if det.get('confidence') is None:
|
||||
det['confidence'] = 0.0
|
||||
# Add debug output
|
||||
if detections:
|
||||
print(f"DEBUG: Detected {len(detections)} objects: " + ", ".join([f"{d['class_name']} ({d['confidence']:.2f})" for d in detections[:3]]))
|
||||
# Print bounding box coordinates of first detection
|
||||
if len(detections) > 0:
|
||||
print(f"DEBUG: First detection bbox: {detections[0]['bbox']}")
|
||||
else:
|
||||
print("DEBUG: No detections in this frame")
|
||||
return detections
|
||||
except Exception as e:
|
||||
print(f"❌ Detection error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracking(self, detections: List[Dict], frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Update tracking information for detections.
|
||||
|
||||
Args:
|
||||
detections: List of detections
|
||||
frame: Current video frame
|
||||
|
||||
Returns:
|
||||
Updated list of detections with tracking info
|
||||
"""
|
||||
if not self.tracker or not detections:
|
||||
# Fallback: assign temporary IDs if no tracker
|
||||
for idx, det in enumerate(detections):
|
||||
det['id'] = idx
|
||||
if det.get('class_name') is None:
|
||||
det['class_name'] = 'object'
|
||||
if det.get('confidence') is None:
|
||||
det['confidence'] = 0.0
|
||||
return detections
|
||||
try:
|
||||
tracker_dets = []
|
||||
det_map = [] # Keep mapping to original detection
|
||||
for det in detections:
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
x1, y1, x2, y2 = bbox
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
if w <= 0 or h <= 0:
|
||||
continue
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_name = det.get('class_name', 'object')
|
||||
tracker_dets.append(([x1, y1, w, h], conf, class_name))
|
||||
det_map.append(det)
|
||||
# Update tracks
|
||||
output = []
|
||||
if tracker_dets:
|
||||
tracks = self.tracker.update_tracks(tracker_dets, frame=frame)
|
||||
for i, track in enumerate(tracks):
|
||||
# FIXED: Handle both object-style tracks (with methods) and dict-style tracks
|
||||
# First check if track is confirmed (handle both dict and object styles)
|
||||
is_confirmed = True # Default to True for dict-style tracks
|
||||
if hasattr(track, 'is_confirmed') and callable(getattr(track, 'is_confirmed')):
|
||||
is_confirmed = track.is_confirmed()
|
||||
|
||||
if not is_confirmed:
|
||||
continue
|
||||
|
||||
# Get track_id (handle both dict and object styles)
|
||||
if hasattr(track, 'track_id'):
|
||||
track_id = track.track_id
|
||||
elif isinstance(track, dict) and 'id' in track:
|
||||
track_id = track['id']
|
||||
else:
|
||||
print(f"Warning: Track has no ID, skipping: {track}")
|
||||
continue
|
||||
|
||||
# Get bounding box (handle both dict and object styles)
|
||||
if hasattr(track, 'to_ltrb') and callable(getattr(track, 'to_ltrb')):
|
||||
ltrb = track.to_ltrb()
|
||||
elif isinstance(track, dict) and 'bbox' in track:
|
||||
ltrb = track['bbox'] # Assume bbox is already in [x1,y1,x2,y2] format
|
||||
else:
|
||||
print(f"Warning: Track has no bbox, skipping: {track}")
|
||||
continue
|
||||
|
||||
# Try to match track to detection by index (DeepSORT returns tracks in same order as input detections)
|
||||
# If not, fallback to previous logic
|
||||
matched_class = 'object'
|
||||
matched_conf = 0.0
|
||||
if hasattr(track, 'det_index') and track.det_index is not None and track.det_index < len(det_map):
|
||||
matched_class = det_map[track.det_index].get('class_name', 'object')
|
||||
matched_conf = det_map[track.det_index].get('confidence', 0.0)
|
||||
else:
|
||||
# Try to match by IoU if possible
|
||||
best_iou = 0
|
||||
for det in det_map:
|
||||
db = det['bbox']
|
||||
iou = self._bbox_iou([int(ltrb[0]), int(ltrb[1]), int(ltrb[2]), int(ltrb[3])], db)
|
||||
if iou > best_iou:
|
||||
best_iou = iou
|
||||
matched_class = det.get('class_name', 'object')
|
||||
matched_conf = det.get('confidence', 0.0)
|
||||
if matched_class is None:
|
||||
matched_class = 'object'
|
||||
if matched_conf is None:
|
||||
matched_conf = 0.0
|
||||
output.append({
|
||||
'bbox': [int(ltrb[0]), int(ltrb[1]), int(ltrb[2]), int(ltrb[3])],
|
||||
'class_name': matched_class,
|
||||
'confidence': matched_conf,
|
||||
'id': track_id
|
||||
})
|
||||
# Fallback: assign temp IDs if no tracks
|
||||
if not output:
|
||||
for idx, det in enumerate(detections):
|
||||
det['id'] = idx
|
||||
if det.get('class_name') is None:
|
||||
det['class_name'] = 'object'
|
||||
if det.get('confidence') is None:
|
||||
det['confidence'] = 0.0
|
||||
return detections
|
||||
return output
|
||||
except Exception as e:
|
||||
print(f"❌ Tracking error: {e}")
|
||||
# Fallback: assign temp IDs
|
||||
for idx, det in enumerate(detections):
|
||||
det['id'] = idx
|
||||
if det.get('class_name') is None:
|
||||
det['class_name'] = 'object'
|
||||
if det.get('confidence') is None:
|
||||
det['confidence'] = 0.0
|
||||
return detections
|
||||
|
||||
def update_config(self, new_config: Dict):
|
||||
"""
|
||||
Update configuration parameters.
|
||||
|
||||
Args:
|
||||
new_config: New configuration dictionary
|
||||
"""
|
||||
if not new_config:
|
||||
return
|
||||
|
||||
# Store old device setting to check if it changed
|
||||
old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO"
|
||||
|
||||
# Update configuration
|
||||
for section in new_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(new_config[section])
|
||||
else:
|
||||
self.config[section] = new_config[section]
|
||||
|
||||
# Check if device changed - if so, we need to reinitialize models
|
||||
new_device = self.config["detection"].get("device", "AUTO")
|
||||
device_changed = old_device != new_device
|
||||
|
||||
if device_changed:
|
||||
print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...")
|
||||
# Reinitialize models with new device
|
||||
self._initialize_models()
|
||||
return
|
||||
|
||||
# Just update detector confidence threshold if device didn't change
|
||||
if self.detector:
|
||||
conf_thres = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
self.detector.conf_thres = conf_thres
|
||||
|
||||
def _bbox_iou(self, boxA, boxB):
|
||||
# Compute the intersection over union of two boxes
|
||||
xA = max(boxA[0], boxB[0])
|
||||
yA = max(boxA[1], boxB[1])
|
||||
xB = min(boxA[2], boxB[2])
|
||||
yB = min(boxA[3], boxB[3])
|
||||
interArea = max(0, xB - xA) * max(0, yB - yA)
|
||||
boxAArea = max(0, boxA[2] - boxA[0]) * max(0, boxA[3] - boxA[1])
|
||||
boxBArea = max(0, boxB[2] - boxB[0]) * max(0, boxB[3] - boxB[1])
|
||||
if boxAArea + boxBArea - interArea == 0:
|
||||
return 0.0
|
||||
iou = interArea / float(boxAArea + boxBArea - interArea)
|
||||
return iou
|
||||
|
||||
def switch_model(self, target_device: str = None) -> bool:
|
||||
"""
|
||||
Manually switch to a different model based on target device.
|
||||
Args:
|
||||
target_device: Target device ("CPU", "GPU", "AUTO", etc.)
|
||||
Returns:
|
||||
True if switch was successful, False otherwise
|
||||
"""
|
||||
if target_device:
|
||||
old_device = self.config["detection"].get("device", "AUTO")
|
||||
self.config["detection"]["device"] = target_device
|
||||
print(f"🔄 Manual model switch requested: {old_device} → {target_device}")
|
||||
# If detector has a switch_model method, use it
|
||||
if hasattr(self.detector, 'switch_model'):
|
||||
try:
|
||||
success = self.detector.switch_model(device=target_device)
|
||||
if success:
|
||||
print(f"✅ Successfully switched to {target_device} optimized model")
|
||||
# If tracker needs update, reinitialize if device changed
|
||||
if old_device != target_device:
|
||||
self._initialize_models() # Optionally update tracker
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Failed to switch detector to {target_device}")
|
||||
self.config["detection"]["device"] = old_device
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to switch model: {e}")
|
||||
self.config["detection"]["device"] = old_device
|
||||
return False
|
||||
else:
|
||||
# Fallback: reinitialize models
|
||||
try:
|
||||
self._initialize_models()
|
||||
print(f"✅ Successfully switched to {target_device} optimized model (fallback)")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to switch model: {e}")
|
||||
self.config["detection"]["device"] = old_device
|
||||
return False
|
||||
return False
|
||||
471
qt_app_pyside1/controllers/new.py
Normal file
471
qt_app_pyside1/controllers/new.py
Normal file
@@ -0,0 +1,471 @@
|
||||
"""
|
||||
Final Video Controller for Automatic Traffic Red-Light Violation Detection
|
||||
- Uses detection_openvino.py for OpenVINO YOLOv11n detection
|
||||
- Crosswalk (zebra crossing) detection using RANSAC/white-line logic
|
||||
- Vehicle tracking using OpenCV trackers
|
||||
- Violation logic: detects vehicles crossing the violation line on red
|
||||
- Visualization and video output
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from sklearn import linear_model
|
||||
|
||||
|
||||
# --- Crosswalk (Zebra Crossing) Detection ---
|
||||
def detect_crosswalk(frame):
|
||||
"""Detect crosswalk (zebra crossing) in the frame. Returns dict with detection status and y position."""
|
||||
# White color mask
|
||||
lower = np.array([170, 170, 170])
|
||||
upper = np.array([255, 255, 255])
|
||||
mask = cv2.inRange(frame, lower, upper)
|
||||
# Erode to remove noise
|
||||
erode_size = max(1, frame.shape[0] // 30)
|
||||
erode_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (erode_size, 1))
|
||||
eroded = cv2.erode(mask, erode_structure, (-1, -1))
|
||||
# Find contours
|
||||
contours, _ = cv2.findContours(eroded, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
|
||||
left_points, right_points = [], []
|
||||
bw_width = 170
|
||||
crosswalk_y = None
|
||||
for cnt in contours:
|
||||
x, y, w, h = cv2.boundingRect(cnt)
|
||||
if w > bw_width:
|
||||
left_points.append([x, y])
|
||||
right_points.append([x + w, y])
|
||||
# RANSAC fit
|
||||
crosswalk_detected = False
|
||||
if len(left_points) > 5 and len(right_points) > 5:
|
||||
left_points = np.array(left_points)
|
||||
right_points = np.array(right_points)
|
||||
model_l = linear_model.RANSACRegressor().fit(left_points[:, 0:1], left_points[:, 1])
|
||||
model_r = linear_model.RANSACRegressor().fit(right_points[:, 0:1], right_points[:, 1])
|
||||
# If the lines are roughly parallel and horizontal, assume crosswalk
|
||||
slope_l = model_l.estimator_.coef_[0]
|
||||
slope_r = model_r.estimator_.coef_[0]
|
||||
if abs(slope_l) < 0.3 and abs(slope_r) < 0.3:
|
||||
crosswalk_detected = True
|
||||
crosswalk_y = int(np.median(left_points[:, 1]))
|
||||
return {'crosswalk_detected': crosswalk_detected, 'crosswalk_y': crosswalk_y}
|
||||
|
||||
def get_traffic_light_color(frame, bbox):
|
||||
"""Detect traffic light color in the given bounding box (x_min, y_min, x_max, y_max). Returns 'red', 'yellow', 'green', or 'unknown'."""
|
||||
x_min, y_min, x_max, y_max = bbox
|
||||
roi = frame[max(0, y_min):y_max, max(0, x_min):x_max]
|
||||
if roi.size == 0:
|
||||
return 'unknown'
|
||||
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
|
||||
mask_red1 = cv2.inRange(hsv, (0, 70, 50), (10, 255, 255))
|
||||
mask_red2 = cv2.inRange(hsv, (170, 70, 50), (180, 255, 255))
|
||||
mask_red = cv2.bitwise_or(mask_red1, mask_red2)
|
||||
mask_yellow = cv2.inRange(hsv, (15, 70, 50), (35, 255, 255))
|
||||
mask_green = cv2.inRange(hsv, (40, 70, 50), (90, 255, 255))
|
||||
red = np.sum(mask_red)
|
||||
yellow = np.sum(mask_yellow)
|
||||
green = np.sum(mask_green)
|
||||
if max(red, yellow, green) == 0:
|
||||
return 'unknown'
|
||||
if red >= yellow and red >= green:
|
||||
return 'red'
|
||||
elif yellow >= green:
|
||||
return 'yellow'
|
||||
else:
|
||||
return 'green'
|
||||
|
||||
##model manager working
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
current_dir = Path(__file__).parent.parent.parent
|
||||
sys.path.append(str(current_dir))
|
||||
|
||||
# Import OpenVINO modules
|
||||
from detection_openvino import OpenVINOVehicleDetector
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
|
||||
# Import from our utils package
|
||||
from utils.helpers import bbox_iou
|
||||
|
||||
class ModelManager:
|
||||
"""
|
||||
Manages OpenVINO models for traffic detection and violation monitoring.
|
||||
Only uses RedLightViolationPipeline for all violation/crosswalk/traffic light logic.
|
||||
"""
|
||||
def __init__(self, config_file: str = None):
|
||||
"""
|
||||
Initialize model manager with configuration.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.detector = None
|
||||
self.violation_pipeline = None # Use RedLightViolationPipeline only
|
||||
self.tracker = None
|
||||
self._initialize_models()
|
||||
|
||||
def _load_config(self, config_file: Optional[str]) -> Dict:
|
||||
"""
|
||||
Load configuration from file or use defaults.
|
||||
|
||||
Args:
|
||||
config_file: Path to JSON configuration file
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
"""
|
||||
import json
|
||||
default_config = {
|
||||
"detection": {
|
||||
"confidence_threshold": 0.5,
|
||||
"enable_ocr": True,
|
||||
"enable_tracking": True,
|
||||
"model_path": None
|
||||
},
|
||||
"violations": {
|
||||
"red_light_grace_period": 2.0,
|
||||
"stop_sign_duration": 2.0,
|
||||
"speed_tolerance": 5
|
||||
},
|
||||
"display": {
|
||||
"max_display_width": 800,
|
||||
"show_confidence": True,
|
||||
"show_labels": True,
|
||||
"show_license_plates": True
|
||||
},
|
||||
"performance": {
|
||||
"max_history_frames": 1000,
|
||||
"cleanup_interval": 3600
|
||||
}
|
||||
}
|
||||
|
||||
if config_file and os.path.exists(config_file):
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
loaded_config = json.load(f)
|
||||
# Merge with defaults (preserving loaded values)
|
||||
for section in default_config:
|
||||
if section in loaded_config:
|
||||
default_config[section].update(loaded_config[section])
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
|
||||
return default_config
|
||||
|
||||
def _initialize_models(self):
|
||||
"""Initialize OpenVINO detection and violation models."""
|
||||
try:
|
||||
# Find best model path
|
||||
model_path = self.config["detection"].get("model_path")
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
model_path = self._find_best_model_path()
|
||||
if not model_path:
|
||||
print("❌ No model found")
|
||||
return
|
||||
|
||||
# Initialize detector
|
||||
print(f"✅ Initializing OpenVINO detector with model: {model_path}")
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
print(f"✅ Using inference device: {device}")
|
||||
self.detector = OpenVINOVehicleDetector(
|
||||
model_path=model_path,
|
||||
device=device,
|
||||
confidence_threshold=self.config["detection"]["confidence_threshold"]
|
||||
)
|
||||
|
||||
# Use only RedLightViolationPipeline for violation/crosswalk/traffic light logic
|
||||
self.violation_pipeline = RedLightViolationPipeline(debug=True)
|
||||
print("✅ Red light violation pipeline initialized (all other violation logic removed)")
|
||||
|
||||
# Initialize tracker if enabled
|
||||
if self.config["detection"]["enable_tracking"]:
|
||||
try:
|
||||
from deep_sort_realtime.deepsort_tracker import DeepSort
|
||||
|
||||
# Use optimized OpenVINO embedder if available
|
||||
use_optimized_embedder = True
|
||||
embedder = None
|
||||
|
||||
if use_optimized_embedder:
|
||||
try:
|
||||
# Try importing our custom OpenVINO embedder
|
||||
from utils.embedder_openvino import OpenVINOEmbedder
|
||||
print(f"✅ Initializing optimized OpenVINO embedder on {device}")
|
||||
|
||||
# Set model_path explicitly to use the user-supplied model
|
||||
script_dir = Path(__file__).parent.parent
|
||||
model_file_path = None
|
||||
|
||||
# Try the copy version first (might be modified for compatibility)
|
||||
copy_model_path = script_dir / "mobilenetv2 copy.xml"
|
||||
original_model_path = script_dir / "mobilenetv2.xml"
|
||||
|
||||
if copy_model_path.exists():
|
||||
model_file_path = str(copy_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
elif original_model_path.exists():
|
||||
model_file_path = str(original_model_path)
|
||||
print(f"✅ Using user-supplied model: {model_file_path}")
|
||||
|
||||
embedder = OpenVINOEmbedder(
|
||||
model_path=model_file_path,
|
||||
device=device,
|
||||
half=True # Use FP16 for better performance
|
||||
)
|
||||
except Exception as emb_err:
|
||||
print(f"⚠️ OpenVINO embedder failed: {emb_err}, falling back to default")
|
||||
|
||||
# Initialize tracker with embedder based on available parameters
|
||||
if embedder is None:
|
||||
print("⚠️ No embedder available, using DeepSORT with default tracking")
|
||||
else:
|
||||
print("✅ Initializing DeepSORT with custom embedder")
|
||||
|
||||
# Simple initialization without problematic parameters
|
||||
self.tracker = DeepSort(
|
||||
max_age=30,
|
||||
n_init=3,
|
||||
nn_budget=100,
|
||||
embedder=embedder
|
||||
)
|
||||
print("✅ DeepSORT tracker initialized")
|
||||
except ImportError:
|
||||
print("⚠️ DeepSORT not available")
|
||||
self.tracker = None
|
||||
print("✅ Models initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error initializing models: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def _find_best_model_path(self, base_model_name: str = None) -> Optional[str]:
|
||||
"""
|
||||
Find best available model file in workspace.
|
||||
|
||||
Args:
|
||||
base_model_name: Base model name without extension
|
||||
|
||||
Returns:
|
||||
Path to model file or None
|
||||
"""
|
||||
# Select model based on device if base_model_name is not specified
|
||||
if base_model_name is None:
|
||||
device = self.config["detection"].get("device", "AUTO")
|
||||
if device == "CPU" or device == "AUTO":
|
||||
# Use yolo11n for CPU - faster, lighter model
|
||||
base_model_name = "yolo11n"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for CPU)")
|
||||
else:
|
||||
# Use yolo11x for GPU - larger model with better accuracy
|
||||
base_model_name = "yolo11x"
|
||||
print(f"🔍 Device is {device}, selecting {base_model_name} model (optimized for GPU)")
|
||||
|
||||
# Check if the openvino_models directory exists in the current working directory
|
||||
cwd_openvino_dir = Path.cwd() / "openvino_models"
|
||||
if cwd_openvino_dir.exists():
|
||||
direct_path = cwd_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model directly in CWD: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for absolute path to openvino_models (this is the most reliable)
|
||||
absolute_openvino_dir = Path("D:/Downloads/finale6/khatam/openvino_models")
|
||||
if absolute_openvino_dir.exists():
|
||||
direct_path = absolute_openvino_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model at absolute path: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Try relative to the model_manager.py file
|
||||
openvino_models_dir = Path(__file__).parent.parent.parent / "openvino_models"
|
||||
direct_path = openvino_models_dir / f"{base_model_name}.xml"
|
||||
if direct_path.exists():
|
||||
print(f"✅ Found model in app directory: {direct_path}")
|
||||
return str(direct_path.absolute())
|
||||
|
||||
# Check for model in folder structure within openvino_models
|
||||
subfolder_path = openvino_models_dir / f"{base_model_name}_openvino_model" / f"{base_model_name}.xml"
|
||||
if subfolder_path.exists():
|
||||
print(f"✅ Found model in subfolder: {subfolder_path}")
|
||||
return str(subfolder_path.absolute())
|
||||
|
||||
# Try other common locations
|
||||
search_dirs = [
|
||||
".",
|
||||
"..",
|
||||
"../models",
|
||||
"../rcb",
|
||||
"../openvino_models",
|
||||
f"../{base_model_name}_openvino_model",
|
||||
"../..", # Go up to project root
|
||||
"../../openvino_models", # Project root / openvino_models
|
||||
]
|
||||
|
||||
model_extensions = [
|
||||
(f"{base_model_name}.xml", "OpenVINO IR direct"),
|
||||
(f"{base_model_name}_openvino_model/{base_model_name}.xml", "OpenVINO IR"),
|
||||
(f"{base_model_name}.pt", "PyTorch"),
|
||||
]
|
||||
|
||||
for search_dir in search_dirs:
|
||||
search_path = Path(__file__).parent.parent / search_dir
|
||||
if not search_path.exists():
|
||||
continue
|
||||
|
||||
for model_file, model_type in model_extensions:
|
||||
model_path = search_path / model_file
|
||||
if model_path.exists():
|
||||
print(f"✅ Found {model_type} model: {model_path}")
|
||||
return str(model_path.absolute())
|
||||
|
||||
print(f"❌ No model found for {base_model_name}")
|
||||
return None
|
||||
|
||||
def detect(self, frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Detect objects in frame.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
|
||||
Returns:
|
||||
List of detection dictionaries
|
||||
"""
|
||||
if self.detector is None:
|
||||
print("WARNING: No detector available")
|
||||
return []
|
||||
try:
|
||||
# Use a lower confidence threshold for better visibility
|
||||
conf_threshold = max(0.3, self.config["detection"].get("confidence_threshold", 0.5))
|
||||
detections = self.detector.detect_vehicles(frame, conf_threshold=conf_threshold)
|
||||
|
||||
# Add debug output
|
||||
if detections:
|
||||
print(f"DEBUG: Detected {len(detections)} objects: " +
|
||||
", ".join([f"{d['class_name']} ({d['confidence']:.2f})" for d in detections[:3]]))
|
||||
|
||||
# Print bounding box coordinates of first detection
|
||||
if len(detections) > 0:
|
||||
print(f"DEBUG: First detection bbox: {detections[0]['bbox']}")
|
||||
else:
|
||||
print("DEBUG: No detections in this frame")
|
||||
|
||||
return detections
|
||||
except Exception as e:
|
||||
print(f"❌ Detection error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return []
|
||||
|
||||
def update_tracking(self, detections: List[Dict], frame: np.ndarray) -> List[Dict]:
|
||||
"""
|
||||
Update tracking information for detections.
|
||||
|
||||
Args:
|
||||
detections: List of detections
|
||||
frame: Current video frame
|
||||
|
||||
Returns:
|
||||
Updated list of detections with tracking info
|
||||
"""
|
||||
if not self.tracker or not detections:
|
||||
return detections
|
||||
|
||||
try:
|
||||
# Format detections for DeepSORT
|
||||
tracker_dets = []
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
x1, y1, x2, y2 = bbox
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
|
||||
if w <= 0 or h <= 0:
|
||||
continue
|
||||
|
||||
conf = det.get('confidence', 0.0)
|
||||
class_name = det.get('class_name', 'unknown')
|
||||
tracker_dets.append(([x1, y1, w, h], conf, class_name))
|
||||
|
||||
# Update tracks
|
||||
if tracker_dets:
|
||||
tracks = self.tracker.update_tracks(tracker_dets, frame=frame)
|
||||
|
||||
# Associate tracks with detections
|
||||
for track in tracks:
|
||||
if not track.is_confirmed():
|
||||
continue
|
||||
|
||||
track_id = track.track_id
|
||||
ltrb = track.to_ltrb()
|
||||
|
||||
for det in detections:
|
||||
if 'bbox' not in det:
|
||||
continue
|
||||
|
||||
bbox = det['bbox']
|
||||
if len(bbox) < 4:
|
||||
continue
|
||||
|
||||
dx1, dy1, dx2, dy2 = bbox
|
||||
iou = bbox_iou((dx1, dy1, dx2, dy2), tuple(map(int, ltrb)))
|
||||
|
||||
if iou > 0.5:
|
||||
det['track_id'] = track_id
|
||||
break
|
||||
return detections
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Tracking error: {e}")
|
||||
return detections
|
||||
|
||||
def update_config(self, new_config: Dict):
|
||||
"""
|
||||
Update configuration parameters.
|
||||
|
||||
Args:
|
||||
new_config: New configuration dictionary
|
||||
"""
|
||||
if not new_config:
|
||||
return
|
||||
|
||||
# Store old device setting to check if it changed
|
||||
old_device = self.config["detection"].get("device", "AUTO") if "detection" in self.config else "AUTO"
|
||||
|
||||
# Update configuration
|
||||
for section in new_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(new_config[section])
|
||||
else:
|
||||
self.config[section] = new_config[section]
|
||||
|
||||
# Check if device changed - if so, we need to reinitialize models
|
||||
new_device = self.config["detection"].get("device", "AUTO")
|
||||
device_changed = old_device != new_device
|
||||
|
||||
if device_changed:
|
||||
print(f"📢 Device changed from {old_device} to {new_device}, reinitializing models...")
|
||||
# Reinitialize models with new device
|
||||
self._initialize_models()
|
||||
return
|
||||
|
||||
# Just update detector confidence threshold if device didn't change
|
||||
if self.detector:
|
||||
conf_thres = self.config["detection"].get("confidence_threshold", 0.5)
|
||||
self.detector.conf_thres = conf_thres
|
||||
41
qt_app_pyside1/controllers/performance_overlay.py
Normal file
41
qt_app_pyside1/controllers/performance_overlay.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from PySide6.QtWidgets import QWidget, QLabel, QVBoxLayout
|
||||
from PySide6.QtCore import QTimer
|
||||
import psutil
|
||||
|
||||
class PerformanceOverlay(QWidget):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setWindowFlags(self.windowFlags() | 0x00080000) # Qt.ToolTip
|
||||
layout = QVBoxLayout(self)
|
||||
self.cpu_label = QLabel("CPU: --%")
|
||||
self.ram_label = QLabel("RAM: --%")
|
||||
self.fps_label = QLabel("FPS: --")
|
||||
self.infer_label = QLabel("Inference: -- ms")
|
||||
layout.addWidget(self.cpu_label)
|
||||
layout.addWidget(self.ram_label)
|
||||
layout.addWidget(self.fps_label)
|
||||
layout.addWidget(self.infer_label)
|
||||
self.fps = None
|
||||
self.infer_time = None
|
||||
self.update_stats()
|
||||
# Add timer for auto-refresh
|
||||
self.timer = QTimer(self)
|
||||
self.timer.timeout.connect(self.update_stats)
|
||||
self.timer.start(1000) # Update every second
|
||||
|
||||
def update_stats(self):
|
||||
self.cpu_label.setText(f"CPU: {psutil.cpu_percent()}%")
|
||||
self.ram_label.setText(f"RAM: {psutil.virtual_memory().percent}%")
|
||||
if self.fps is not None:
|
||||
self.fps_label.setText(f"FPS: {self.fps:.1f}")
|
||||
else:
|
||||
self.fps_label.setText("FPS: --")
|
||||
if self.infer_time is not None:
|
||||
self.infer_label.setText(f"Inference: {self.infer_time:.1f} ms")
|
||||
else:
|
||||
self.infer_label.setText("Inference: -- ms")
|
||||
|
||||
def set_video_stats(self, fps, inference_time):
|
||||
self.fps = fps
|
||||
self.infer_time = inference_time
|
||||
self.update_stats()
|
||||
306
qt_app_pyside1/controllers/red_light_violation_detector.py
Normal file
306
qt_app_pyside1/controllers/red_light_violation_detector.py
Normal file
@@ -0,0 +1,306 @@
|
||||
"""
|
||||
Red Light Violation Detector for traffic monitoring in Qt application
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import time
|
||||
from typing import Dict, List, Tuple, Optional, Any
|
||||
from collections import deque
|
||||
import datetime
|
||||
import os
|
||||
|
||||
# Import utilities
|
||||
from utils.crosswalk_utils2 import (
|
||||
detect_crosswalk_and_violation_line,
|
||||
draw_violation_line
|
||||
)
|
||||
# Import traffic light utilities
|
||||
try:
|
||||
from utils.traffic_light_utils import detect_traffic_light_color, draw_traffic_light_status
|
||||
print("✅ Imported traffic light utilities in violation detector")
|
||||
except ImportError:
|
||||
def detect_traffic_light_color(frame, bbox):
|
||||
return {"color": "unknown", "confidence": 0.0}
|
||||
def draw_traffic_light_status(frame, bbox, color):
|
||||
return frame
|
||||
print("⚠️ Failed to import traffic light utilities")
|
||||
|
||||
class RedLightViolationDetector:
|
||||
"""
|
||||
Detect red light violations based on traffic light status and vehicle positions.
|
||||
|
||||
This class integrates crosswalk/stop line detection with traffic light color
|
||||
detection to identify vehicles that cross the line during a red light.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the detector with default settings."""
|
||||
# Detection state
|
||||
self.violation_line_y = None
|
||||
self.detection_enabled = True
|
||||
self.detection_mode = "auto" # "auto", "crosswalk", "stopline"
|
||||
|
||||
# Track vehicles for violation detection
|
||||
self.tracked_vehicles = {} # id -> {position_history, violation_status}
|
||||
self.violations = []
|
||||
|
||||
# Store frames for snapshots/video clips
|
||||
self.violation_buffer = deque(maxlen=30) # Store ~1 second of frames
|
||||
|
||||
# Settings
|
||||
self.confidence_threshold = 0.5
|
||||
self.save_snapshots = True
|
||||
self.snapshot_dir = os.path.join(os.path.expanduser("~"), "Documents", "TrafficViolations")
|
||||
os.makedirs(self.snapshot_dir, exist_ok=True)
|
||||
|
||||
def detect_violation_line(self, frame: np.ndarray, traffic_light_bbox: Optional[List[int]] = None) -> int:
|
||||
"""
|
||||
Detect the violation line in the frame using crosswalk or stop line detection.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
traffic_light_bbox: Optional traffic light bounding box for context
|
||||
|
||||
Returns:
|
||||
Y-coordinate of the violation line
|
||||
"""
|
||||
frame_height = frame.shape[0]
|
||||
|
||||
try:
|
||||
# Try to detect crosswalk first if mode is auto or crosswalk
|
||||
if self.detection_mode in ["auto", "crosswalk"]:
|
||||
# Use the new function for crosswalk and violation line detection
|
||||
result_frame, crosswalk_bbox, violation_line_y, crosswalk_debug = detect_crosswalk_and_violation_line(frame)
|
||||
print(f"Crosswalk detection result: bbox={crosswalk_bbox}, vline_y={violation_line_y}")
|
||||
frame = result_frame # Use the frame with overlays for further processing or display
|
||||
if crosswalk_bbox:
|
||||
# Use the top of the crosswalk as the violation line
|
||||
self.violation_line_y = crosswalk_bbox[1] - 10 # 10px before crosswalk
|
||||
self.detection_mode = "crosswalk" # If auto and found crosswalk, switch to crosswalk mode
|
||||
print(f"✅ Using crosswalk for violation line at y={self.violation_line_y}")
|
||||
return self.violation_line_y
|
||||
|
||||
# If traffic light is detected, position line below it
|
||||
if traffic_light_bbox:
|
||||
x1, y1, x2, y2 = traffic_light_bbox
|
||||
# Position the line a bit below the traffic light
|
||||
proposed_y = y2 + int(frame_height * 0.15) # 15% of frame height below traffic light
|
||||
# Don't place too low in the frame
|
||||
if proposed_y < frame_height * 0.85:
|
||||
self.violation_line_y = proposed_y
|
||||
print(f"✅ Using traffic light position for violation line at y={self.violation_line_y}")
|
||||
return self.violation_line_y
|
||||
|
||||
# If nothing detected, use a default position based on frame height
|
||||
self.violation_line_y = int(frame_height * 0.75) # Default position at 75% of frame height
|
||||
print(f"ℹ️ Using default violation line at y={self.violation_line_y}")
|
||||
|
||||
return self.violation_line_y
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error in detect_violation_line: {e}")
|
||||
# Fallback
|
||||
return int(frame_height * 0.75)
|
||||
|
||||
def process_frame(self, frame: np.ndarray, detections: List[Dict],
|
||||
current_traffic_light_color: str) -> Tuple[np.ndarray, List[Dict]]:
|
||||
"""
|
||||
Process a frame to detect red light violations.
|
||||
|
||||
Args:
|
||||
frame: Input video frame
|
||||
detections: List of detection dictionaries with 'class_name', 'bbox', etc.
|
||||
current_traffic_light_color: Current traffic light color ('red', 'yellow', 'green', 'unknown')
|
||||
|
||||
Returns:
|
||||
Tuple of (annotated frame, list of violation events)
|
||||
"""
|
||||
if not self.detection_enabled:
|
||||
return frame, []
|
||||
|
||||
# Store original frame for violation buffer
|
||||
self.violation_buffer.append(frame.copy())
|
||||
|
||||
# Annotate frame for visualization
|
||||
annotated_frame = frame.copy()
|
||||
# Get traffic light position if available
|
||||
traffic_light_bbox = None
|
||||
for det in detections:
|
||||
# Check for both 'traffic light' and class_id 9 (COCO class for traffic light)
|
||||
if det.get('class_name') == 'traffic light' or det.get('class_id') == 9:
|
||||
traffic_light_bbox = det.get('bbox')
|
||||
print(f"Found traffic light with bbox: {traffic_light_bbox}")
|
||||
break
|
||||
|
||||
# Detect violation line if not already detected
|
||||
if self.violation_line_y is None or self.violation_line_y <= 0:
|
||||
print(f"Detecting violation line with traffic light bbox: {traffic_light_bbox}")
|
||||
try:
|
||||
self.violation_line_y = self.detect_violation_line(frame, traffic_light_bbox)
|
||||
print(f"Successfully detected violation line at y={self.violation_line_y}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error detecting violation line: {e}")
|
||||
# Fallback to default position
|
||||
self.violation_line_y = int(frame.shape[0] * 0.75)
|
||||
print(f"Using default violation line at y={self.violation_line_y}")
|
||||
|
||||
# Draw violation line with enhanced visualization
|
||||
# Handle both string and dictionary return formats for compatibility
|
||||
if isinstance(current_traffic_light_color, dict):
|
||||
is_red = current_traffic_light_color.get("color") == "red"
|
||||
confidence = current_traffic_light_color.get("confidence", 0.0)
|
||||
confidence_text = f" (Conf: {confidence:.2f})"
|
||||
else:
|
||||
is_red = current_traffic_light_color == "red"
|
||||
confidence_text = ""
|
||||
|
||||
line_color = (0, 0, 255) if is_red else (0, 255, 0)
|
||||
annotated_frame = draw_violation_line(
|
||||
annotated_frame,
|
||||
self.violation_line_y,
|
||||
line_color,
|
||||
f"VIOLATION LINE - {current_traffic_light_color.get('color', current_traffic_light_color).upper()}{confidence_text}"
|
||||
)
|
||||
|
||||
# --- DEBUG: Always draw a hardcoded violation line for testing ---
|
||||
if self.violation_line_y is None or self.violation_line_y <= 0:
|
||||
frame_height = frame.shape[0]
|
||||
# Example: draw at 75% of frame height
|
||||
self.violation_line_y = int(frame_height * 0.75)
|
||||
print(f"[DEBUG] Drawing fallback violation line at y={self.violation_line_y}")
|
||||
import cv2
|
||||
cv2.line(annotated_frame, (0, self.violation_line_y), (frame.shape[1], self.violation_line_y), (0, 0, 255), 3)
|
||||
|
||||
# Track vehicles and check for violations
|
||||
violations_this_frame = []
|
||||
|
||||
# Process each detection
|
||||
for detection in detections:
|
||||
class_name = detection.get('class_name')
|
||||
confidence = detection.get('confidence', 0.0)
|
||||
bbox = detection.get('bbox')
|
||||
track_id = detection.get('track_id', -1)
|
||||
# Only process vehicles with sufficient confidence
|
||||
# Include both class_name and class_id checks for better compatibility
|
||||
is_vehicle = (class_name in ['car', 'truck', 'bus', 'motorcycle'] or
|
||||
detection.get('class_id') in [2, 3, 5, 7]) # COCO classes for vehicles
|
||||
|
||||
if (is_vehicle and
|
||||
confidence >= self.confidence_threshold and
|
||||
bbox is not None):
|
||||
# Use object id or generate temporary one if tracking id not available
|
||||
if track_id < 0:
|
||||
# Generate a temporary ID based on position and size
|
||||
x1, y1, x2, y2 = bbox
|
||||
temp_id = f"temp_{int((x1+x2)/2)}_{int((y1+y2)/2)}_{int((x2-x1)*(y2-y1))}"
|
||||
track_id = temp_id
|
||||
|
||||
# Initialize tracking if this is a new vehicle
|
||||
if track_id not in self.tracked_vehicles:
|
||||
print(f"🚗 New vehicle detected with ID: {track_id}")
|
||||
self.tracked_vehicles[track_id] = {
|
||||
'positions': deque(maxlen=30), # Store ~1 second of positions
|
||||
'violated': False,
|
||||
'first_detected': time.time()
|
||||
}
|
||||
|
||||
# Update position history
|
||||
vehicle_data = self.tracked_vehicles[track_id]
|
||||
vehicle_data['positions'].append((bbox, time.time()))
|
||||
|
||||
# Check for violation only if traffic light is red
|
||||
# Handle both string and dictionary return formats
|
||||
is_red = False
|
||||
if isinstance(current_traffic_light_color, dict):
|
||||
is_red = current_traffic_light_color.get("color") == "red"
|
||||
confidence = current_traffic_light_color.get("confidence", 0.0)
|
||||
# Only consider red if confidence is above threshold
|
||||
is_red = is_red and confidence >= 0.4
|
||||
else:
|
||||
is_red = current_traffic_light_color == "red"
|
||||
|
||||
if (is_red and
|
||||
not vehicle_data['violated'] and
|
||||
check_vehicle_violation(bbox, self.violation_line_y)):
|
||||
|
||||
# Mark as violated
|
||||
vehicle_data['violated'] = True
|
||||
|
||||
# Create violation record with enhanced information
|
||||
violation = {
|
||||
'id': len(self.violations) + 1,
|
||||
'track_id': track_id,
|
||||
'timestamp': datetime.datetime.now(),
|
||||
'vehicle_type': class_name,
|
||||
'confidence': detection.get('confidence', 0.0),
|
||||
'bbox': bbox,
|
||||
'violation_type': 'red_light',
|
||||
'snapshot_path': None
|
||||
}
|
||||
|
||||
# Add traffic light information if available
|
||||
if isinstance(current_traffic_light_color, dict):
|
||||
violation['traffic_light'] = {
|
||||
'color': current_traffic_light_color.get('color', 'red'),
|
||||
'confidence': current_traffic_light_color.get('confidence', 0.0)
|
||||
}
|
||||
else:
|
||||
violation['traffic_light'] = {
|
||||
'color': current_traffic_light_color,
|
||||
'confidence': 1.0
|
||||
}
|
||||
|
||||
# Save snapshot if enabled
|
||||
if self.save_snapshots:
|
||||
snapshot_path = os.path.join(
|
||||
self.snapshot_dir,
|
||||
f"violation_{violation['id']}_{int(time.time())}.jpg"
|
||||
)
|
||||
cv2.imwrite(snapshot_path, frame)
|
||||
violation['snapshot_path'] = snapshot_path
|
||||
|
||||
# Add to violations list
|
||||
self.violations.append(violation)
|
||||
violations_this_frame.append(violation)
|
||||
|
||||
# Draw violation box
|
||||
x1, y1, x2, y2 = bbox
|
||||
cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), (0, 0, 255), 3)
|
||||
cv2.putText(
|
||||
annotated_frame,
|
||||
f"RED LIGHT VIOLATION #{violation['id']}",
|
||||
(x1, y1 - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.7,
|
||||
(0, 0, 255),
|
||||
2
|
||||
)
|
||||
|
||||
# Clean up old tracked vehicles to prevent memory leaks
|
||||
current_time = time.time()
|
||||
old_ids = [tid for tid, data in self.tracked_vehicles.items()
|
||||
if current_time - data['first_detected'] > 30] # Remove after 30 seconds
|
||||
for tid in old_ids:
|
||||
del self.tracked_vehicles[tid]
|
||||
|
||||
return annotated_frame, violations_this_frame
|
||||
|
||||
def reset(self):
|
||||
"""Reset the detector state."""
|
||||
self.violation_line_y = None
|
||||
self.tracked_vehicles = {}
|
||||
# Keep violations history
|
||||
|
||||
def get_violations(self) -> List[Dict]:
|
||||
"""
|
||||
Get all detected violations.
|
||||
|
||||
Returns:
|
||||
List of violation dictionaries
|
||||
"""
|
||||
return self.violations
|
||||
|
||||
def clear_violations(self):
|
||||
"""Clear all violation records."""
|
||||
self.violations = []
|
||||
9595
qt_app_pyside1/controllers/video_controller.py
Normal file
9595
qt_app_pyside1/controllers/video_controller.py
Normal file
File diff suppressed because it is too large
Load Diff
384
qt_app_pyside1/controllers/video_controller.py.new
Normal file
384
qt_app_pyside1/controllers/video_controller.py.new
Normal file
@@ -0,0 +1,384 @@
|
||||
from PySide6.QtCore import QObject, Signal, QThread, Qt, QMutex, QWaitCondition, QTimer
|
||||
from PySide6.QtGui import QImage, QPixmap
|
||||
import cv2
|
||||
import time
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
from typing import Dict, List, Optional
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
# Import utilities
|
||||
from utils.annotation_utils import (
|
||||
draw_detections,
|
||||
draw_violations,
|
||||
draw_performance_metrics,
|
||||
resize_frame_for_display,
|
||||
convert_cv_to_qimage,
|
||||
convert_cv_to_pixmap
|
||||
)
|
||||
|
||||
class VideoController(QObject):
|
||||
frame_ready = Signal(object, object, object, dict) # QPixmap, detections, violations, metrics
|
||||
raw_frame_ready = Signal(np.ndarray, list, list, float) # frame, detections, violations, fps
|
||||
|
||||
def __init__(self, model_manager=None):
|
||||
"""
|
||||
Initialize video controller.
|
||||
|
||||
Args:
|
||||
model_manager: Model manager instance for detection and violation
|
||||
"""
|
||||
super().__init__()
|
||||
self.model_manager = model_manager
|
||||
self.source = 0 # Default camera source
|
||||
self._running = False
|
||||
self.frame_count = 0
|
||||
self.start_time = 0
|
||||
self.source_fps = 0
|
||||
self.actual_fps = 0
|
||||
self.processing_times = deque(maxlen=30)
|
||||
self.cap = None # VideoCapture object
|
||||
|
||||
# Configure thread
|
||||
self.thread = QThread()
|
||||
self.moveToThread(self.thread)
|
||||
self.thread.started.connect(self._run)
|
||||
|
||||
# Performance measurement
|
||||
self.mutex = QMutex()
|
||||
self.condition = QWaitCondition()
|
||||
self.performance_metrics = {
|
||||
'FPS': 0.0,
|
||||
'Detection (ms)': 0.0,
|
||||
'Violation (ms)': 0.0,
|
||||
'Total (ms)': 0.0
|
||||
}
|
||||
|
||||
# Setup render timer
|
||||
self.render_timer = QTimer()
|
||||
self.render_timer.timeout.connect(self._process_frame)
|
||||
|
||||
# Frame buffer
|
||||
self.current_frame = None
|
||||
self.current_detections = []
|
||||
self.current_violations = []
|
||||
|
||||
# Debug counter
|
||||
self.debug_counter = 0
|
||||
|
||||
def set_source(self, source):
|
||||
"""Set video source (file path, camera index, or URL)"""
|
||||
print(f"DEBUG: VideoController.set_source called with: {source} (type: {type(source)})")
|
||||
|
||||
was_running = self._running
|
||||
if self._running:
|
||||
self.stop()
|
||||
|
||||
# Critical fix: Make sure source is properly set
|
||||
if source is None:
|
||||
print("WARNING: Received None source, defaulting to camera 0")
|
||||
self.source = 0
|
||||
elif isinstance(source, str) and source.strip():
|
||||
# Handle file paths - verify the file exists
|
||||
if os.path.exists(source):
|
||||
self.source = source
|
||||
print(f"DEBUG: VideoController source set to file: {self.source}")
|
||||
else:
|
||||
# Try to interpret as camera index or URL
|
||||
try:
|
||||
# If it's a digit string, convert to integer camera index
|
||||
if source.isdigit():
|
||||
self.source = int(source)
|
||||
print(f"DEBUG: VideoController source set to camera index: {self.source}")
|
||||
else:
|
||||
# Treat as URL or special device string
|
||||
self.source = source
|
||||
print(f"DEBUG: VideoController source set to URL/device: {self.source}")
|
||||
except ValueError:
|
||||
print(f"WARNING: Could not interpret source: {source}, defaulting to camera 0")
|
||||
self.source = 0
|
||||
elif isinstance(source, int):
|
||||
# Camera index
|
||||
self.source = source
|
||||
print(f"DEBUG: VideoController source set to camera index: {self.source}")
|
||||
else:
|
||||
print(f"WARNING: Unrecognized source type: {type(source)}, defaulting to camera 0")
|
||||
self.source = 0
|
||||
|
||||
# Get properties of the source (fps, dimensions, etc)
|
||||
self._get_source_properties()
|
||||
|
||||
if was_running:
|
||||
self.start()
|
||||
|
||||
def _get_source_properties(self):
|
||||
"""Get properties of video source"""
|
||||
try:
|
||||
cap = cv2.VideoCapture(self.source)
|
||||
if cap.isOpened():
|
||||
self.source_fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
if self.source_fps <= 0:
|
||||
self.source_fps = 30.0 # Default if undetectable
|
||||
|
||||
self.frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
self.frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
cap.release()
|
||||
|
||||
print(f"Video source: {self.frame_width}x{self.frame_height}, {self.source_fps} FPS")
|
||||
else:
|
||||
print("Failed to open video source")
|
||||
except Exception as e:
|
||||
print(f"Error getting source properties: {e}")
|
||||
|
||||
def start(self):
|
||||
"""Start video processing"""
|
||||
if not self._running:
|
||||
self._running = True
|
||||
self.start_time = time.time()
|
||||
self.frame_count = 0
|
||||
self.debug_counter = 0
|
||||
print("DEBUG: Starting video processing thread")
|
||||
|
||||
# Start the processing thread
|
||||
if not self.thread.isRunning():
|
||||
self.thread.start()
|
||||
|
||||
# Start the render timer with a faster interval (16ms = ~60fps)
|
||||
self.render_timer.start(16)
|
||||
print("DEBUG: Render timer started")
|
||||
|
||||
def stop(self):
|
||||
"""Stop video processing"""
|
||||
if self._running:
|
||||
print("DEBUG: Stopping video processing")
|
||||
self._running = False
|
||||
self.render_timer.stop()
|
||||
|
||||
# Properly terminate the thread
|
||||
self.thread.quit()
|
||||
if not self.thread.wait(3000): # Wait 3 seconds max
|
||||
self.thread.terminate()
|
||||
print("WARNING: Thread termination forced")
|
||||
|
||||
# Close the capture if it exists
|
||||
if self.cap and self.cap.isOpened():
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
# Clear the current frame
|
||||
self.mutex.lock()
|
||||
self.current_frame = None
|
||||
self.mutex.unlock()
|
||||
print("DEBUG: Video processing stopped")
|
||||
|
||||
def capture_snapshot(self) -> np.ndarray:
|
||||
"""Capture current frame"""
|
||||
if self.current_frame is not None:
|
||||
return self.current_frame.copy()
|
||||
return None
|
||||
|
||||
def _run(self):
|
||||
"""Main processing loop (runs in thread)"""
|
||||
try:
|
||||
# Print the source we're trying to open
|
||||
print(f"DEBUG: Opening video source: {self.source} (type: {type(self.source)})")
|
||||
|
||||
# Initialize the capture
|
||||
self.cap = None
|
||||
|
||||
# Handle different source types
|
||||
if isinstance(self.source, str) and os.path.exists(self.source):
|
||||
# It's a valid file path
|
||||
print(f"DEBUG: Opening video file: {self.source}")
|
||||
self.cap = cv2.VideoCapture(self.source)
|
||||
|
||||
# Verify file opened successfully
|
||||
if not self.cap.isOpened():
|
||||
print(f"ERROR: Could not open video file: {self.source}")
|
||||
return
|
||||
|
||||
elif isinstance(self.source, int) or (isinstance(self.source, str) and self.source.isdigit()):
|
||||
# It's a camera index
|
||||
camera_idx = int(self.source) if isinstance(self.source, str) else self.source
|
||||
print(f"DEBUG: Opening camera: {camera_idx}")
|
||||
self.cap = cv2.VideoCapture(camera_idx)
|
||||
|
||||
# Try a few times to open camera (sometimes takes a moment)
|
||||
retry_count = 0
|
||||
while not self.cap.isOpened() and retry_count < 3:
|
||||
print(f"Camera not ready, retrying ({retry_count+1}/3)...")
|
||||
time.sleep(1)
|
||||
self.cap.release()
|
||||
self.cap = cv2.VideoCapture(camera_idx)
|
||||
retry_count += 1
|
||||
|
||||
if not self.cap.isOpened():
|
||||
print(f"ERROR: Could not open camera {camera_idx} after {retry_count} attempts")
|
||||
return
|
||||
else:
|
||||
# Try as a string source (URL or device path)
|
||||
print(f"DEBUG: Opening source as string: {self.source}")
|
||||
self.cap = cv2.VideoCapture(str(self.source))
|
||||
|
||||
if not self.cap.isOpened():
|
||||
print(f"ERROR: Could not open source: {self.source}")
|
||||
return
|
||||
|
||||
# Check again to ensure capture is valid
|
||||
if not self.cap or not self.cap.isOpened():
|
||||
print(f"ERROR: Could not open video source {self.source}")
|
||||
return
|
||||
|
||||
# Configure frame timing based on source FPS
|
||||
frame_time = 1.0 / self.source_fps if self.source_fps > 0 else 0.033
|
||||
prev_time = time.time()
|
||||
|
||||
# Log successful opening
|
||||
print(f"SUCCESS: Video source opened: {self.source}")
|
||||
print(f"Source info - FPS: {self.source_fps}, Size: {self.frame_width}x{self.frame_height}")
|
||||
|
||||
# Main processing loop
|
||||
while self._running and self.cap.isOpened():
|
||||
ret, frame = self.cap.read()
|
||||
if not ret:
|
||||
print("End of video or read error")
|
||||
break
|
||||
|
||||
# Detection and violation processing
|
||||
process_start = time.time()
|
||||
|
||||
# Process detections
|
||||
detection_start = time.time()
|
||||
detections = []
|
||||
if self.model_manager:
|
||||
detections = self.model_manager.detect(frame)
|
||||
detection_time = (time.time() - detection_start) * 1000
|
||||
|
||||
# Violation detection is disabled
|
||||
violation_start = time.time()
|
||||
violations = []
|
||||
# if self.model_manager and detections:
|
||||
# violations = self.model_manager.detect_violations(
|
||||
# detections, frame, time.time()
|
||||
# )
|
||||
violation_time = (time.time() - violation_start) * 1000
|
||||
|
||||
# Update tracking if available
|
||||
if self.model_manager:
|
||||
detections = self.model_manager.update_tracking(detections, frame)
|
||||
|
||||
# Calculate timing metrics
|
||||
process_time = (time.time() - process_start) * 1000
|
||||
self.processing_times.append(process_time)
|
||||
|
||||
# Update FPS
|
||||
now = time.time()
|
||||
self.frame_count += 1
|
||||
elapsed = now - self.start_time
|
||||
if elapsed > 0:
|
||||
self.actual_fps = self.frame_count / elapsed
|
||||
|
||||
fps_smoothed = 1.0 / (now - prev_time) if now > prev_time else 0
|
||||
prev_time = now
|
||||
|
||||
# Update metrics
|
||||
self.performance_metrics = {
|
||||
'FPS': f"{fps_smoothed:.1f}",
|
||||
'Detection (ms)': f"{detection_time:.1f}",
|
||||
'Violation (ms)': f"{violation_time:.1f}",
|
||||
'Total (ms)': f"{process_time:.1f}"
|
||||
}
|
||||
|
||||
# Store current frame data (thread-safe)
|
||||
self.mutex.lock()
|
||||
self.current_frame = frame.copy()
|
||||
self.current_detections = detections
|
||||
self.current_violations = violations
|
||||
self.mutex.unlock()
|
||||
|
||||
# Signal for raw data subscribers
|
||||
self.raw_frame_ready.emit(frame.copy(), detections, violations, fps_smoothed)
|
||||
|
||||
# Control processing rate for file sources
|
||||
if isinstance(self.source, str) and self.source_fps > 0:
|
||||
frame_duration = time.time() - process_start
|
||||
if frame_duration < frame_time:
|
||||
time.sleep(frame_time - frame_duration)
|
||||
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
except Exception as e:
|
||||
print(f"Video processing error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
finally:
|
||||
self._running = False
|
||||
if self.cap and self.cap.isOpened():
|
||||
self.cap.release()
|
||||
self.cap = None
|
||||
|
||||
def _process_frame(self):
|
||||
"""Process current frame for UI rendering (called by timer)"""
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
# Debug counter
|
||||
if hasattr(self, 'debug_counter'):
|
||||
self.debug_counter += 1
|
||||
if self.debug_counter % 30 == 0: # Print every ~30 frames
|
||||
print(f"DEBUG: Frame processing iteration: {self.debug_counter}")
|
||||
|
||||
# Get frame data safely
|
||||
self.mutex.lock()
|
||||
frame = self.current_frame.copy() if self.current_frame is not None else None
|
||||
detections = self.current_detections.copy() if hasattr(self, 'current_detections') and self.current_detections else []
|
||||
violations = self.current_violations.copy() if hasattr(self, 'current_violations') and self.current_violations else []
|
||||
metrics = self.performance_metrics.copy()
|
||||
self.mutex.unlock()
|
||||
|
||||
if frame is None:
|
||||
print("DEBUG: _process_frame skipped - no frame available")
|
||||
return
|
||||
|
||||
try:
|
||||
# Annotate frame
|
||||
annotated_frame = frame.copy()
|
||||
if detections:
|
||||
annotated_frame = draw_detections(annotated_frame, detections, True, True)
|
||||
|
||||
# Draw metrics
|
||||
annotated_frame = draw_performance_metrics(annotated_frame, metrics)
|
||||
|
||||
# Resize for display
|
||||
display_frame = resize_frame_for_display(annotated_frame)
|
||||
|
||||
# Convert to QPixmap directly using a better approach
|
||||
rgb_image = cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB)
|
||||
h, w, ch = rgb_image.shape
|
||||
bytes_per_line = ch * w
|
||||
|
||||
# Create QImage - critical: use .copy() to ensure data stays valid
|
||||
q_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888).copy()
|
||||
|
||||
# Convert to pixmap
|
||||
pixmap = QPixmap.fromImage(q_image)
|
||||
|
||||
# Emit signal with processed frame
|
||||
if not pixmap.isNull():
|
||||
print(f"DEBUG: Emitting pixmap: {pixmap.width()}x{pixmap.height()}")
|
||||
self.frame_ready.emit(pixmap, detections, violations, metrics)
|
||||
else:
|
||||
print("ERROR: Created QPixmap is null")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR in _process_frame: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
3981
qt_app_pyside1/controllers/video_controller_finale.py
Normal file
3981
qt_app_pyside1/controllers/video_controller_finale.py
Normal file
File diff suppressed because it is too large
Load Diff
1673
qt_app_pyside1/controllers/video_controller_new.py
Normal file
1673
qt_app_pyside1/controllers/video_controller_new.py
Normal file
File diff suppressed because it is too large
Load Diff
BIN
qt_app_pyside1/debug_crosswalk_group.png
Normal file
BIN
qt_app_pyside1/debug_crosswalk_group.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 194 KiB |
120
qt_app_pyside1/deployed.py
Normal file
120
qt_app_pyside1/deployed.py
Normal file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Simple Deployment Script for Traffic Monitoring System
|
||||
====================================================
|
||||
|
||||
This script simply replaces main.py with a better version that loads main_window1.py first.
|
||||
|
||||
Bhai, no advanced features - just simple main.py edit!
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
def deploy_main_py():
|
||||
"""Deploy simple enhanced version to main.py"""
|
||||
main_py_path = os.path.join(os.path.dirname(__file__), "main.py")
|
||||
backup_path = os.path.join(os.path.dirname(__file__), "main_backup.py")
|
||||
|
||||
try:
|
||||
# Create backup of original main.py
|
||||
if os.path.exists(main_py_path):
|
||||
import shutil
|
||||
shutil.copy2(main_py_path, backup_path)
|
||||
print(f"✅ Backup created: {backup_path}")
|
||||
|
||||
# Write the simple enhanced version to main.py
|
||||
enhanced_main_content = '''from PySide6.QtWidgets import QApplication
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
def main():
|
||||
# Create application instance first
|
||||
app = QApplication.instance() or QApplication(sys.argv)
|
||||
|
||||
# Show splash screen if available
|
||||
splash = None
|
||||
try:
|
||||
from splash import show_splash
|
||||
splash, app = show_splash(app)
|
||||
except Exception as e:
|
||||
print(f"Could not show splash screen: {e}")
|
||||
|
||||
# Add a short delay to show the splash screen
|
||||
if splash:
|
||||
time.sleep(1)
|
||||
|
||||
# Try to load UI with fallback - Modern UI first!
|
||||
try:
|
||||
# Try modern UI first (main_window1.py)
|
||||
print("🔄 Attempting to load MainWindow1 (Modern UI)...")
|
||||
from ui.main_window1 import MainWindow
|
||||
print("✅ SUCCESS: Using enhanced MainWindow1 with modern UI")
|
||||
except Exception as e:
|
||||
# Fall back to standard version
|
||||
print(f"⚠️ Could not load MainWindow1: {e}")
|
||||
print("🔄 Attempting fallback to standard MainWindow...")
|
||||
try:
|
||||
from ui.main_window import MainWindow
|
||||
print("✅ Using standard MainWindow")
|
||||
except Exception as e:
|
||||
print(f"❌ Could not load any MainWindow: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
# Initialize main window
|
||||
window = MainWindow()
|
||||
|
||||
# Close splash if it exists
|
||||
if splash:
|
||||
splash.finish(window)
|
||||
|
||||
# Show main window
|
||||
window.show()
|
||||
|
||||
# Start application event loop
|
||||
sys.exit(app.exec())
|
||||
except Exception as e:
|
||||
print(f"❌ Error starting application: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
'''
|
||||
|
||||
with open(main_py_path, 'w', encoding='utf-8') as f:
|
||||
f.write(enhanced_main_content)
|
||||
|
||||
print(f"✅ Enhanced main.py deployed successfully!")
|
||||
print(f"📝 Original main.py backed up to: {backup_path}")
|
||||
print(f"🎯 You can now run: python main.py")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to deploy main.py: {e}")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🚀 Simple Traffic Monitoring System Deployment")
|
||||
print("=" * 50)
|
||||
print()
|
||||
print("This will replace main.py to load main_window1.py first (Modern UI)")
|
||||
print()
|
||||
|
||||
choice = input("Deploy enhanced main.py? (y/n): ").strip().lower()
|
||||
|
||||
if choice in ['y', 'yes']:
|
||||
print("\n📦 Deploying enhanced version to main.py...")
|
||||
if deploy_main_py():
|
||||
print("✅ Deployment successful!")
|
||||
print("🎯 Now run: python main.py")
|
||||
else:
|
||||
print("❌ Deployment failed!")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("\n👋 Goodbye!")
|
||||
sys.exit(0)
|
||||
BIN
qt_app_pyside1/dist/FixedDebug.exe
LFS
vendored
Normal file
BIN
qt_app_pyside1/dist/FixedDebug.exe
LFS
vendored
Normal file
Binary file not shown.
BIN
qt_app_pyside1/dist/QuickDebug.exe
LFS
vendored
Normal file
BIN
qt_app_pyside1/dist/QuickDebug.exe
LFS
vendored
Normal file
Binary file not shown.
BIN
qt_app_pyside1/dist/TrafficMonitor.exe
LFS
vendored
Normal file
BIN
qt_app_pyside1/dist/TrafficMonitor.exe
LFS
vendored
Normal file
Binary file not shown.
23
qt_app_pyside1/docker-compose.yml
Normal file
23
qt_app_pyside1/docker-compose.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
version: "3.8"
|
||||
services:
|
||||
qt_app:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: qt-app-x11:latest
|
||||
environment:
|
||||
- DISPLAY=:99
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
ports:
|
||||
- "8501:8501"
|
||||
command: ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "ps aux | grep -q run_app.py"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2g
|
||||
130
qt_app_pyside1/enhanced_main_window.py
Normal file
130
qt_app_pyside1/enhanced_main_window.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
Patch for the MainWindow class to use EnhancedVideoController by default.
|
||||
This file is imported by main.py to modify MainWindow's behavior.
|
||||
"""
|
||||
|
||||
# Import all necessary Qt components
|
||||
from PySide6.QtCore import Qt, QTimer
|
||||
from PySide6.QtWidgets import QMessageBox
|
||||
|
||||
# Import the enhanced controller - handle potential import errors
|
||||
try:
|
||||
from controllers.enhanced_video_controller import EnhancedVideoController
|
||||
except ImportError:
|
||||
try:
|
||||
from qt_app_pyside.controllers.enhanced_video_controller import EnhancedVideoController
|
||||
except ImportError:
|
||||
print("⚠️ Warning: Could not import EnhancedVideoController. Using fallback controller.")
|
||||
EnhancedVideoController = None
|
||||
|
||||
# Original imports preserved for compatibility - handle potential import errors
|
||||
try:
|
||||
from controllers.video_controller_new import VideoController
|
||||
from controllers.analytics_controller import AnalyticsController
|
||||
from controllers.performance_overlay import PerformanceOverlay
|
||||
from controllers.model_manager import ModelManager
|
||||
except ImportError:
|
||||
try:
|
||||
from qt_app_pyside.controllers.video_controller_new import VideoController
|
||||
from qt_app_pyside.controllers.analytics_controller import AnalyticsController
|
||||
from qt_app_pyside.controllers.performance_overlay import PerformanceOverlay
|
||||
from qt_app_pyside.controllers.model_manager import ModelManager
|
||||
except ImportError:
|
||||
print("⚠️ Warning: Could not import controller modules.")
|
||||
|
||||
# Store original method reference
|
||||
original_setup_controllers = None
|
||||
|
||||
def enhanced_setup_controllers(self):
|
||||
"""Enhanced version of setupControllers that uses the EnhancedVideoController"""
|
||||
global EnhancedVideoController, ModelManager, AnalyticsController
|
||||
|
||||
# If modules couldn't be imported, fall back to original method
|
||||
if EnhancedVideoController is None:
|
||||
print("⚠️ Enhanced controller not available, falling back to original setup")
|
||||
if original_setup_controllers:
|
||||
original_setup_controllers(self)
|
||||
return
|
||||
|
||||
# Store existing source if video controller already exists
|
||||
existing_source = None
|
||||
if hasattr(self, 'video_controller') and self.video_controller:
|
||||
# Grab the current source before replacing the controller
|
||||
print("📽️ Preserving existing video source...")
|
||||
try:
|
||||
# Try to get source from the processing thread
|
||||
if hasattr(self.video_controller, 'processing_thread') and self.video_controller.processing_thread:
|
||||
existing_source = self.video_controller.processing_thread.source
|
||||
print(f"✅ Preserved source from processing thread: {existing_source}")
|
||||
# Backup: Get source directly from live tab
|
||||
elif hasattr(self, 'live_tab') and hasattr(self.live_tab, 'current_source'):
|
||||
existing_source = self.live_tab.current_source
|
||||
print(f"✅ Preserved source from live tab: {existing_source}")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Could not preserve source: {e}")
|
||||
|
||||
# Load config from file
|
||||
try: # Initialize model manager
|
||||
self.model_manager = ModelManager(self.config_file)
|
||||
|
||||
# Create enhanced video controller instead of regular one
|
||||
print("🚀 Creating enhanced video controller with async inference...")
|
||||
self.video_controller = EnhancedVideoController(self.model_manager)
|
||||
|
||||
# Restore the source if we had one or check the live tab
|
||||
# First try the source we grabbed earlier
|
||||
if existing_source is not None and existing_source != 0:
|
||||
print(f"🔄 Restoring video source from previous controller: {existing_source}")
|
||||
self.video_controller.set_source(existing_source)
|
||||
# If we couldn't get it from the previous controller, try getting it from the live tab directly
|
||||
elif hasattr(self, 'live_tab') and hasattr(self.live_tab, 'current_source') and self.live_tab.current_source is not None and self.live_tab.current_source != 0:
|
||||
print(f"🔄 Using source directly from live_tab: {self.live_tab.current_source}")
|
||||
self.video_controller.set_source(self.live_tab.current_source)
|
||||
|
||||
# Create analytics controller
|
||||
self.analytics_controller = AnalyticsController()
|
||||
|
||||
# Setup update timer for performance overlay
|
||||
self.perf_timer = QTimer()
|
||||
self.perf_timer.timeout.connect(self.performance_overlay.update_stats)
|
||||
self.perf_timer.start(1000) # Update every second
|
||||
|
||||
# Important: Do NOT set a default source - let the UI handle it properly
|
||||
# This allows video files to be loaded and remembered
|
||||
|
||||
print("✅ Enhanced controller setup complete!")
|
||||
|
||||
except Exception as e:
|
||||
# Show error message
|
||||
from PySide6.QtWidgets import QMessageBox
|
||||
QMessageBox.critical(
|
||||
self,
|
||||
"Initialization Error",
|
||||
f"Error initializing enhanced controllers: {str(e)}"
|
||||
)
|
||||
print(f"❌ Error details: {e}")
|
||||
# Fall back to original method if there's an error
|
||||
if original_setup_controllers:
|
||||
print("⚠️ Falling back to original controller setup")
|
||||
original_setup_controllers(self)
|
||||
|
||||
# Function to patch the MainWindow class and return the patched version
|
||||
def patch_mainwindow_class():
|
||||
"""
|
||||
Import and patch the MainWindow class to use EnhancedVideoController by default.
|
||||
Returns the patched MainWindow class.
|
||||
"""
|
||||
global original_setup_controllers
|
||||
|
||||
# Import MainWindow here to avoid circular imports
|
||||
from ui.main_window import MainWindow
|
||||
|
||||
# Store the original method
|
||||
original_setup_controllers = MainWindow.setupControllers
|
||||
|
||||
# Replace with enhanced method
|
||||
MainWindow.setupControllers = enhanced_setup_controllers
|
||||
|
||||
print("✅ MainWindow patched to use EnhancedVideoController")
|
||||
|
||||
return MainWindow
|
||||
203
qt_app_pyside1/finale/UI.py
Normal file
203
qt_app_pyside1/finale/UI.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""
|
||||
Finale UI - Main Entry Point
|
||||
Modern traffic monitoring interface entry point.
|
||||
"""
|
||||
|
||||
from PySide6.QtWidgets import QApplication
|
||||
from PySide6.QtCore import Qt
|
||||
from PySide6.QtGui import QFont, QPalette, QColor
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Import finale components
|
||||
try:
|
||||
# Try relative imports first (when running as a package)
|
||||
from .main_window import FinaleMainWindow
|
||||
from .splash import FinaleSplashScreen
|
||||
from .styles import FinaleStyles, MaterialColors
|
||||
from .icons import FinaleIcons
|
||||
except ImportError:
|
||||
# Fallback to direct imports (when running as script)
|
||||
try:
|
||||
from main_window import FinaleMainWindow
|
||||
from splash import FinaleSplashScreen
|
||||
from styles import FinaleStyles, MaterialColors
|
||||
from icons import FinaleIcons
|
||||
except ImportError:
|
||||
print('Error importing main components')
|
||||
|
||||
# Add Qt message handler from original main.py
|
||||
def qt_message_handler(mode, context, message):
|
||||
print(f"Qt Message: {message} (Mode: {mode})")
|
||||
# Install custom handler for Qt messages
|
||||
from PySide6.QtCore import Qt
|
||||
if hasattr(Qt, 'qInstallMessageHandler'):
|
||||
Qt.qInstallMessageHandler(qt_message_handler)
|
||||
|
||||
class FinaleUI:
|
||||
"""
|
||||
Main Finale UI application class.
|
||||
Handles application initialization, theme setup, and window management.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.app = None
|
||||
self.main_window = None
|
||||
self.splash = None
|
||||
|
||||
def initialize_application(self, sys_argv=None):
|
||||
"""
|
||||
Initialize the QApplication with proper settings.
|
||||
|
||||
Args:
|
||||
sys_argv: System arguments (defaults to sys.argv)
|
||||
"""
|
||||
if sys_argv is None:
|
||||
sys_argv = sys.argv
|
||||
|
||||
# Create or get existing application instance
|
||||
self.app = QApplication.instance() or QApplication(sys_argv)
|
||||
|
||||
# Set application properties
|
||||
self.app.setApplicationName("Finale Traffic Monitoring")
|
||||
self.app.setApplicationVersion("1.0.0")
|
||||
self.app.setOrganizationName("Finale Systems")
|
||||
self.app.setOrganizationDomain("finale.traffic")
|
||||
|
||||
# Set application icon
|
||||
self.app.setWindowIcon(FinaleIcons.get_icon("traffic_monitoring"))
|
||||
|
||||
# Enable high DPI scaling
|
||||
self.app.setAttribute(Qt.AA_EnableHighDpiScaling, True)
|
||||
self.app.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
|
||||
|
||||
# Set font
|
||||
self.setup_fonts()
|
||||
|
||||
# Set global theme
|
||||
self.setup_theme()
|
||||
|
||||
return self.app
|
||||
|
||||
def setup_fonts(self):
|
||||
"""Setup application fonts"""
|
||||
# Set default font
|
||||
font = QFont("Segoe UI", 9)
|
||||
font.setHintingPreference(QFont.PreferDefaultHinting)
|
||||
self.app.setFont(font)
|
||||
|
||||
def setup_theme(self):
|
||||
"""Setup global application theme"""
|
||||
# Apply dark theme by default
|
||||
MaterialColors.apply_dark_theme()
|
||||
|
||||
# Set global stylesheet
|
||||
self.app.setStyleSheet(FinaleStyles.get_global_style())
|
||||
|
||||
def show_splash_screen(self):
|
||||
"""Show splash screen during initialization"""
|
||||
try:
|
||||
self.splash = FinaleSplashScreen()
|
||||
self.splash.show()
|
||||
|
||||
# Process events to show splash
|
||||
self.app.processEvents()
|
||||
|
||||
return self.splash
|
||||
except Exception as e:
|
||||
print(f"Could not show splash screen: {e}")
|
||||
return None
|
||||
|
||||
def create_main_window(self):
|
||||
"""Create and initialize the main window"""
|
||||
try:
|
||||
self.main_window = FinaleMainWindow()
|
||||
return self.main_window
|
||||
except Exception as e:
|
||||
print(f"Error creating main window: {e}")
|
||||
raise
|
||||
|
||||
def run(self, show_splash=True):
|
||||
"""
|
||||
Run the complete Finale UI application.
|
||||
|
||||
Args:
|
||||
show_splash: Whether to show splash screen
|
||||
|
||||
Returns:
|
||||
Application exit code
|
||||
"""
|
||||
try:
|
||||
# Initialize application
|
||||
if not self.app:
|
||||
self.initialize_application()
|
||||
|
||||
# Show splash screen
|
||||
if show_splash:
|
||||
splash = self.show_splash_screen()
|
||||
if splash:
|
||||
splash.update_progress(20, "Initializing UI components...")
|
||||
self.app.processEvents()
|
||||
|
||||
# Create main window
|
||||
if splash:
|
||||
splash.update_progress(50, "Loading detection models...")
|
||||
self.app.processEvents()
|
||||
|
||||
self.main_window = self.create_main_window()
|
||||
|
||||
if splash:
|
||||
splash.update_progress(80, "Connecting to backend...")
|
||||
self.app.processEvents()
|
||||
|
||||
# Finish splash and show main window
|
||||
if splash:
|
||||
splash.update_progress(100, "Ready!")
|
||||
self.app.processEvents()
|
||||
splash.finish(self.main_window)
|
||||
|
||||
# Show main window
|
||||
self.main_window.show()
|
||||
|
||||
# Start event loop
|
||||
return self.app.exec()
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error running Finale UI: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
|
||||
def create_finale_app(sys_argv=None):
|
||||
"""
|
||||
Create and return a Finale UI application instance.
|
||||
|
||||
Args:
|
||||
sys_argv: System arguments
|
||||
|
||||
Returns:
|
||||
FinaleUI instance
|
||||
"""
|
||||
finale_ui = FinaleUI()
|
||||
finale_ui.initialize_application(sys_argv)
|
||||
return finale_ui
|
||||
|
||||
def run_finale_ui(sys_argv=None, show_splash=True):
|
||||
"""
|
||||
Convenience function to run the Finale UI.
|
||||
|
||||
Args:
|
||||
sys_argv: System arguments
|
||||
show_splash: Whether to show splash screen
|
||||
|
||||
Returns:
|
||||
Application exit code
|
||||
"""
|
||||
finale_ui = create_finale_app(sys_argv)
|
||||
return finale_ui.run(show_splash)
|
||||
|
||||
# Main execution
|
||||
if __name__ == "__main__":
|
||||
exit_code = run_finale_ui()
|
||||
sys.exit(exit_code)
|
||||
1
qt_app_pyside1/finale/__init__.py
Normal file
1
qt_app_pyside1/finale/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Finale module for traffic monitoring system
|
||||
432
qt_app_pyside1/finale/icons.py
Normal file
432
qt_app_pyside1/finale/icons.py
Normal file
@@ -0,0 +1,432 @@
|
||||
"""
|
||||
Icon Management System
|
||||
=====================
|
||||
|
||||
Comprehensive icon system with SVG icons, Material Design icons,
|
||||
and utility functions for the Traffic Monitoring Application.
|
||||
|
||||
Features:
|
||||
- Material Design icon set
|
||||
- SVG icon generation
|
||||
- Icon theming and colorization
|
||||
- Size variants and scaling
|
||||
- Custom icon registration
|
||||
"""
|
||||
|
||||
from PySide6.QtGui import QIcon, QPixmap, QPainter, QColor, QBrush, QPen
|
||||
from PySide6.QtCore import Qt, QSize
|
||||
from PySide6.QtSvg import QSvgRenderer
|
||||
from typing import Dict, Optional, Tuple
|
||||
import base64
|
||||
from io import BytesIO
|
||||
|
||||
class IconTheme:
|
||||
"""Icon theme management"""
|
||||
|
||||
# Icon colors for dark theme
|
||||
PRIMARY = "#FFFFFF"
|
||||
SECONDARY = "#B0B0B0"
|
||||
ACCENT = "#00BCD4"
|
||||
SUCCESS = "#4CAF50"
|
||||
WARNING = "#FF9800"
|
||||
ERROR = "#F44336"
|
||||
INFO = "#2196F3"
|
||||
|
||||
class SVGIcons:
|
||||
"""Collection of SVG icons as base64 encoded strings"""
|
||||
|
||||
# Navigation icons
|
||||
HOME = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 2l7 7v11h-4v-7h-6v7H5V9l7-7z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
PLAY = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M8 5v14l11-7z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
PAUSE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M6 4h4v16H6V4zm8 0h4v16h-4V4z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
STOP = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M6 6h12v12H6z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
RECORD = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<circle cx="12" cy="12" r="8"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
# Detection and monitoring icons
|
||||
CAMERA = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 8.8c-2.1 0-3.8 1.7-3.8 3.8s1.7 3.8 3.8 3.8 3.8-1.7 3.8-3.8-1.7-3.8-3.8-3.8z"/>
|
||||
<path d="M21 7h-3.4l-1.9-2.6c-.4-.5-.9-.8-1.6-.8H9.9c-.7 0-1.2.3-1.6.8L6.4 7H3c-1.1 0-2 .9-2 2v10c0 1.1.9 2 2 2h18c1.1 0 2-.9 2-2V9c0-1.1-.9-2-2-2z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
MONITOR = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M21 3H3c-1.1 0-2 .9-2 2v11c0 1.1.9 2 2 2h6l-2 3v1h8v-1l-2-3h6c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 11H3V5h18v9z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
TRAFFIC_LIGHT = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<rect x="8" y="2" width="8" height="20" rx="4" stroke="currentColor" stroke-width="2" fill="none"/>
|
||||
<circle cx="12" cy="7" r="2" fill="#F44336"/>
|
||||
<circle cx="12" cy="12" r="2" fill="#FF9800"/>
|
||||
<circle cx="12" cy="17" r="2" fill="#4CAF50"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
VIOLATION = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 2L1 21h22L12 2zm0 3.99L19.53 19H4.47L12 5.99zM11 16h2v2h-2v-2zm0-6h2v4h-2v-4z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
# Analytics and statistics icons
|
||||
CHART_BAR = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M5 9v6h4V9H5zm6-4v10h4V5h-4zm6 6v4h4v-4h-4z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
CHART_LINE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M3.5 18.49l6-6.01 4 4L22 6.92l-1.41-1.41-7.09 7.97-4-4L3 16.99l.5 1.5z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
CHART_PIE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M11 2v20c-5.07-.5-9-4.79-9-10s3.93-9.5 9-10zm2.03 0v8.99H22c-.47-4.74-4.24-8.52-8.97-8.99zm0 11.01V22c4.74-.47 8.5-4.25 8.97-8.99h-8.97z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
DASHBOARD = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M3 13h8V3H3v10zm0 8h8v-6H3v6zm10 0h8V11h-8v10zm0-18v6h8V3h-8z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
# System and settings icons
|
||||
SETTINGS = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M19.14,12.94c0.04-0.3,0.06-0.61,0.06-0.94c0-0.32-0.02-0.64-0.07-0.94l2.03-1.58c0.18-0.14,0.23-0.41,0.12-0.61 l-1.92-3.32c-0.12-0.22-0.37-0.29-0.59-0.22l-2.39,0.96c-0.5-0.38-1.03-0.7-1.62-0.94L14.4,2.81c-0.04-0.24-0.24-0.41-0.48-0.41 h-3.84c-0.24,0-0.43,0.17-0.47,0.41L9.25,5.35C8.66,5.59,8.12,5.92,7.63,6.29L5.24,5.33c-0.22-0.08-0.47,0-0.59,0.22L2.74,8.87 C2.62,9.08,2.66,9.34,2.86,9.48l2.03,1.58C4.84,11.36,4.8,11.69,4.8,12s0.02,0.64,0.07,0.94l-2.03,1.58 c-0.18,0.14-0.23,0.41-0.12,0.61l1.92,3.32c0.12,0.22,0.37,0.29,0.59,0.22l2.39-0.96c0.5,0.38,1.03,0.7,1.62,0.94l0.36,2.54 c0.05,0.24,0.24,0.41,0.48,0.41h3.84c0.24,0,0.44-0.17,0.47-0.41l0.36-2.54c0.59-0.24,1.13-0.56,1.62-0.94l2.39,0.96 c0.22,0.08,0.47,0,0.59-0.22l1.92-3.32c0.12-0.22,0.07-0.47-0.12-0.61L19.14,12.94z M12,15.6c-1.98,0-3.6-1.62-3.6-3.6 s1.62-3.6,3.6-3.6s3.6,1.62,3.6,3.6S13.98,15.6,12,15.6z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
EXPORT = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 2L8 8h3v8h2V8h3l-4-6zm7 7h-2v10H7V9H5v10c0 1.1.9 2 2 2h10c1.1 0 2-.9 2-2V9z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
IMPORT = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 22L8 16h3V8h2v8h3l-4 6zm7-15h-2V5H7v2H5V5c0-1.1.9-2 2-2h10c1.1 0 2 .9 2 2v2z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
SAVE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M17 3H5c-1.11 0-2 .9-2 2v14c0 1.1.89 2 2 2h14c1.1 0 2-.9 2-2V7l-4-4zm-5 16c-1.66 0-3-1.34-3-3s1.34-3 3-3 3 1.34 3 3-1.34 3-3 3zm3-10H5V6h10v3z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
# Status and alert icons
|
||||
CHECK_CIRCLE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-2 15l-5-5 1.41-1.41L10 14.17l7.59-7.59L19 8l-9 9z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
WARNING_CIRCLE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm1 15h-2v-2h2v2zm0-4h-2V7h2v6z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
ERROR_CIRCLE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 2C6.47 2 2 6.47 2 12s4.47 10 10 10 10-4.47 10-10S17.53 2 12 2zm5 13.59L15.59 17 12 13.41 8.41 17 7 15.59 10.59 12 7 8.41 8.41 7 12 10.59 15.59 7 17 8.41 13.41 12 17 15.59z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
INFO_CIRCLE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm1 15h-2v-6h2v6zm0-8h-2V7h2v2z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
# Action icons
|
||||
REFRESH = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M17.65 6.35C16.2 4.9 14.21 4 12 4c-4.42 0-7.99 3.58-7.99 8s3.57 8 7.99 8c3.73 0 6.84-2.55 7.73-6h-2.08c-.82 2.33-3.04 4-5.65 4-3.31 0-6-2.69-6-6s2.69-6 6-6c1.66 0 3.14.69 4.22 1.78L13 11h7V4l-2.35 2.35z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
DELETE = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M6 19c0 1.1.9 2 2 2h8c1.1 0 2-.9 2-2V7H6v12zM19 4h-3.5l-1-1h-5l-1 1H5v2h14V4z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
EDIT = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M3 17.25V21h3.75L17.81 9.94l-3.75-3.75L3 17.25zM20.71 7.04c.39-.39.39-1.02 0-1.41l-2.34-2.34c-.39-.39-1.02-.39-1.41 0l-1.83 1.83 3.75 3.75 1.83-1.83z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
FILTER = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M10 18h4v-2h-4v2zM3 6v2h18V6H3zm3 7h12v-2H6v2z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
SEARCH = """
|
||||
<svg viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M15.5 14h-.79l-.28-.27C15.41 12.59 16 11.11 16 9.5 16 5.91 13.09 3 9.5 3S3 5.91 3 9.5 5.91 16 9.5 16c1.61 0 3.09-.59 4.23-1.57l.27.28v.79l5 4.99L20.49 19l-4.99-5zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
class IconManager:
|
||||
"""Manages icons for the application"""
|
||||
|
||||
def __init__(self):
|
||||
self._icon_cache: Dict[str, QIcon] = {}
|
||||
self.theme = IconTheme()
|
||||
|
||||
def get_icon(self, name: str, color: str = IconTheme.PRIMARY, size: int = 24) -> QIcon:
|
||||
"""Get an icon by name with specified color and size"""
|
||||
cache_key = f"{name}_{color}_{size}"
|
||||
|
||||
if cache_key in self._icon_cache:
|
||||
return self._icon_cache[cache_key]
|
||||
|
||||
# Get SVG content
|
||||
svg_content = getattr(SVGIcons, name.upper(), None)
|
||||
if not svg_content:
|
||||
return QIcon() # Return empty icon if not found
|
||||
|
||||
# Replace currentColor with specified color
|
||||
svg_content = svg_content.replace('currentColor', color)
|
||||
|
||||
# Create icon from SVG
|
||||
icon = self._create_icon_from_svg(svg_content, size)
|
||||
self._icon_cache[cache_key] = icon
|
||||
|
||||
return icon
|
||||
|
||||
def _create_icon_from_svg(self, svg_content: str, size: int) -> QIcon:
|
||||
"""Create QIcon from SVG content"""
|
||||
# Create QSvgRenderer from SVG content
|
||||
svg_bytes = svg_content.encode('utf-8')
|
||||
renderer = QSvgRenderer(svg_bytes)
|
||||
|
||||
# Create pixmap
|
||||
pixmap = QPixmap(size, size)
|
||||
pixmap.fill(Qt.transparent)
|
||||
|
||||
# Paint SVG onto pixmap
|
||||
painter = QPainter(pixmap)
|
||||
renderer.render(painter)
|
||||
painter.end()
|
||||
|
||||
return QIcon(pixmap)
|
||||
|
||||
def get_status_icon(self, status: str, size: int = 16) -> QIcon:
|
||||
"""Get icon for specific status"""
|
||||
status_map = {
|
||||
'success': ('CHECK_CIRCLE', IconTheme.SUCCESS),
|
||||
'warning': ('WARNING_CIRCLE', IconTheme.WARNING),
|
||||
'error': ('ERROR_CIRCLE', IconTheme.ERROR),
|
||||
'info': ('INFO_CIRCLE', IconTheme.INFO),
|
||||
'violation': ('VIOLATION', IconTheme.ERROR),
|
||||
'active': ('PLAY', IconTheme.SUCCESS),
|
||||
'inactive': ('PAUSE', IconTheme.SECONDARY),
|
||||
'recording': ('RECORD', IconTheme.ERROR)
|
||||
}
|
||||
|
||||
icon_name, color = status_map.get(status, ('INFO_CIRCLE', IconTheme.INFO))
|
||||
return self.get_icon(icon_name, color, size)
|
||||
|
||||
def get_action_icon(self, action: str, size: int = 20) -> QIcon:
|
||||
"""Get icon for specific action"""
|
||||
action_map = {
|
||||
'play': 'PLAY',
|
||||
'pause': 'PAUSE',
|
||||
'stop': 'STOP',
|
||||
'record': 'RECORD',
|
||||
'settings': 'SETTINGS',
|
||||
'export': 'EXPORT',
|
||||
'import': 'IMPORT',
|
||||
'save': 'SAVE',
|
||||
'refresh': 'REFRESH',
|
||||
'delete': 'DELETE',
|
||||
'edit': 'EDIT',
|
||||
'filter': 'FILTER',
|
||||
'search': 'SEARCH'
|
||||
}
|
||||
|
||||
icon_name = action_map.get(action, 'INFO_CIRCLE')
|
||||
return self.get_icon(icon_name, IconTheme.PRIMARY, size)
|
||||
|
||||
def get_navigation_icon(self, view: str, size: int = 24) -> QIcon:
|
||||
"""Get icon for navigation views"""
|
||||
nav_map = {
|
||||
'home': 'HOME',
|
||||
'detection': 'CAMERA',
|
||||
'violations': 'VIOLATION',
|
||||
'analytics': 'DASHBOARD',
|
||||
'export': 'EXPORT',
|
||||
'monitor': 'MONITOR',
|
||||
'chart': 'CHART_BAR'
|
||||
}
|
||||
|
||||
icon_name = nav_map.get(view, 'HOME')
|
||||
return self.get_icon(icon_name, IconTheme.ACCENT, size)
|
||||
|
||||
def create_colored_icon(self, base_icon: str, color: str, size: int = 24) -> QIcon:
|
||||
"""Create a colored version of an icon"""
|
||||
return self.get_icon(base_icon, color, size)
|
||||
|
||||
def set_theme_color(self, color: str):
|
||||
"""Set the theme accent color"""
|
||||
self.theme.ACCENT = color
|
||||
# Clear cache to regenerate icons with new color
|
||||
self._icon_cache.clear()
|
||||
|
||||
# Global icon manager instance
|
||||
icon_manager = IconManager()
|
||||
|
||||
# Convenience functions
|
||||
def get_icon(name: str, color: str = IconTheme.PRIMARY, size: int = 24) -> QIcon:
|
||||
"""Get an icon - convenience function"""
|
||||
return icon_manager.get_icon(name, color, size)
|
||||
|
||||
def get_status_icon(status: str, size: int = 16) -> QIcon:
|
||||
"""Get status icon - convenience function"""
|
||||
return icon_manager.get_status_icon(status, size)
|
||||
|
||||
def get_action_icon(action: str, size: int = 20) -> QIcon:
|
||||
"""Get action icon - convenience function"""
|
||||
return icon_manager.get_action_icon(action, size)
|
||||
|
||||
def get_navigation_icon(view: str, size: int = 24) -> QIcon:
|
||||
"""Get navigation icon - convenience function"""
|
||||
return icon_manager.get_navigation_icon(view, size)
|
||||
|
||||
# Common icon sets for easy access
|
||||
class CommonIcons:
|
||||
"""Commonly used icon combinations"""
|
||||
|
||||
@staticmethod
|
||||
def toolbar_icons() -> Dict[str, QIcon]:
|
||||
"""Get all toolbar icons"""
|
||||
return {
|
||||
'play': get_action_icon('play'),
|
||||
'pause': get_action_icon('pause'),
|
||||
'stop': get_action_icon('stop'),
|
||||
'record': get_action_icon('record'),
|
||||
'settings': get_action_icon('settings'),
|
||||
'export': get_action_icon('export'),
|
||||
'refresh': get_action_icon('refresh')
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def status_icons() -> Dict[str, QIcon]:
|
||||
"""Get all status icons"""
|
||||
return {
|
||||
'success': get_status_icon('success'),
|
||||
'warning': get_status_icon('warning'),
|
||||
'error': get_status_icon('error'),
|
||||
'info': get_status_icon('info'),
|
||||
'violation': get_status_icon('violation'),
|
||||
'active': get_status_icon('active'),
|
||||
'inactive': get_status_icon('inactive'),
|
||||
'recording': get_status_icon('recording')
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def navigation_icons() -> Dict[str, QIcon]:
|
||||
"""Get all navigation icons"""
|
||||
return {
|
||||
'detection': get_navigation_icon('detection'),
|
||||
'violations': get_navigation_icon('violations'),
|
||||
'analytics': get_navigation_icon('analytics'),
|
||||
'export': get_navigation_icon('export'),
|
||||
'monitor': get_navigation_icon('monitor')
|
||||
}
|
||||
|
||||
# Traffic light specific icons
|
||||
def create_traffic_light_icon(red_on: bool = False, yellow_on: bool = False, green_on: bool = False, size: int = 32) -> QIcon:
|
||||
"""Create a traffic light icon with specific lights on/off"""
|
||||
svg_template = f"""
|
||||
<svg viewBox="0 0 24 24" width="{size}" height="{size}">
|
||||
<rect x="8" y="2" width="8" height="20" rx="4" stroke="#424242" stroke-width="2" fill="#2C2C2C"/>
|
||||
<circle cx="12" cy="7" r="2" fill="{'#F44336' if red_on else '#5D4037'}"/>
|
||||
<circle cx="12" cy="12" r="2" fill="{'#FF9800' if yellow_on else '#5D4037'}"/>
|
||||
<circle cx="12" cy="17" r="2" fill="{'#4CAF50' if green_on else '#5D4037'}"/>
|
||||
</svg>
|
||||
"""
|
||||
|
||||
svg_bytes = svg_template.encode('utf-8')
|
||||
renderer = QSvgRenderer(svg_bytes)
|
||||
|
||||
pixmap = QPixmap(size, size)
|
||||
pixmap.fill(Qt.transparent)
|
||||
|
||||
painter = QPainter(pixmap)
|
||||
renderer.render(painter)
|
||||
painter.end()
|
||||
|
||||
return QIcon(pixmap)
|
||||
|
||||
# New FinaleIcons class to wrap the existing functionality
|
||||
class FinaleIcons:
|
||||
"""
|
||||
Wrapper class for icon management to maintain compatibility
|
||||
with existing code that references FinaleIcons.get_icon() etc.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def get_icon(name: str, color: str = IconTheme.PRIMARY, size: int = 24) -> QIcon:
|
||||
"""Get an icon by name"""
|
||||
return get_icon(name, color, size)
|
||||
|
||||
@staticmethod
|
||||
def get_status_icon(status: str, size: int = 16) -> QIcon:
|
||||
"""Get a status icon"""
|
||||
return get_status_icon(status, size)
|
||||
|
||||
@staticmethod
|
||||
def get_action_icon(action: str, size: int = 20) -> QIcon:
|
||||
"""Get an action icon"""
|
||||
return get_action_icon(action, size)
|
||||
|
||||
@staticmethod
|
||||
def get_navigation_icon(view: str, size: int = 24) -> QIcon:
|
||||
"""Get a navigation icon"""
|
||||
return get_navigation_icon(view, size)
|
||||
|
||||
@staticmethod
|
||||
def create_colored_icon(base_icon: str, color: str, size: int = 24) -> QIcon:
|
||||
"""Create a colored version of an icon"""
|
||||
return get_icon(base_icon, color, size)
|
||||
|
||||
@staticmethod
|
||||
def traffic_light_icon(red_on: bool = False, yellow_on: bool = False, green_on: bool = False, size: int = 32) -> QIcon:
|
||||
"""Create a traffic light icon with specific lights on/off"""
|
||||
return create_traffic_light_icon(red_on, yellow_on, green_on, size)
|
||||
51
qt_app_pyside1/finale/main.py
Normal file
51
qt_app_pyside1/finale/main.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from PySide6.QtWidgets import QApplication
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
def main():
|
||||
# Create application instance first
|
||||
app = QApplication.instance() or QApplication(sys.argv)
|
||||
|
||||
# Show splash screen if available
|
||||
splash = None
|
||||
try:
|
||||
from splash import show_splash
|
||||
splash, app = show_splash(app)
|
||||
except Exception as e:
|
||||
print(f"Could not show splash screen: {e}")
|
||||
|
||||
# Add a short delay to show the splash screen
|
||||
if splash:
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
# Try to use enhanced version with traffic light detection
|
||||
from ..ui.main_window import MainWindow
|
||||
print("✅ Using standard MainWindow")
|
||||
except Exception as e:
|
||||
# Fall back to standard version
|
||||
print(f"⚠️ Could not load MainWindow: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
# Initialize main window
|
||||
window = MainWindow()
|
||||
|
||||
# Close splash if it exists
|
||||
if splash:
|
||||
splash.finish(window)
|
||||
|
||||
# Show main window
|
||||
window.show()
|
||||
|
||||
# Start application event loop
|
||||
sys.exit(app.exec())
|
||||
except Exception as e:
|
||||
print(f"❌ Error starting application: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
558
qt_app_pyside1/finale/main_window.py
Normal file
558
qt_app_pyside1/finale/main_window.py
Normal file
@@ -0,0 +1,558 @@
|
||||
"""
|
||||
Finale UI - Modern Main Window
|
||||
Advanced traffic monitoring interface with Material Design and dark theme.
|
||||
Connects to existing detection/violation logic from qt_app_pyside.
|
||||
"""
|
||||
|
||||
from PySide6.QtWidgets import (
|
||||
QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QTabWidget,
|
||||
QDockWidget, QSplitter, QFrame, QMessageBox, QApplication,
|
||||
QFileDialog, QStatusBar, QMenuBar, QMenu, QToolBar
|
||||
)
|
||||
from PySide6.QtCore import Qt, QTimer, QSettings, QSize, Signal, Slot, QPropertyAnimation, QEasingCurve
|
||||
from PySide6.QtGui import QIcon, QPixmap, QAction, QPainter, QBrush, QColor
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
# Import finale UI components
|
||||
try:
|
||||
# Try relative imports first (when running as a package)
|
||||
from .styles import FinaleStyles, MaterialColors
|
||||
from .icons import FinaleIcons
|
||||
from .toolbar import FinaleToolbar
|
||||
from .components.stats_widgets import StatsWidget, MetricsWidget, SystemResourceWidget
|
||||
from .views import LiveView, AnalyticsView, ViolationsView, SettingsView
|
||||
except ImportError:
|
||||
# Fallback to direct imports (when running as script)
|
||||
try:
|
||||
from styles import FinaleStyles, MaterialColors
|
||||
from icons import FinaleIcons
|
||||
from toolbar import FinaleToolbar
|
||||
from components.stats_widgets import StatsWidget, MetricsWidget, SystemResourceWidget
|
||||
from views import LiveView, AnalyticsView, ViolationsView, SettingsView
|
||||
except ImportError:
|
||||
print('Error importing main window components')
|
||||
|
||||
# Import existing detection/violation logic from qt_app_pyside
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
try:
|
||||
from controllers.model_manager import ModelManager
|
||||
from controllers.video_controller_new import VideoController
|
||||
from controllers.analytics_controller import AnalyticsController
|
||||
from controllers.performance_overlay import PerformanceOverlay
|
||||
# Import detection_openvino for advanced detection logic
|
||||
from detection_openvino import OpenVINOVehicleDetector
|
||||
from red_light_violation_pipeline import RedLightViolationPipeline
|
||||
from utils.helpers import load_configuration, save_configuration
|
||||
from utils.annotation_utils import draw_detections, convert_cv_to_pixmap
|
||||
from utils.enhanced_annotation_utils import enhanced_draw_detections
|
||||
from utils.traffic_light_utils import detect_traffic_light_color
|
||||
except ImportError as e:
|
||||
print(f"Warning: Could not import some dependencies: {e}")
|
||||
# Fallback imports
|
||||
from controllers.model_manager import ModelManager
|
||||
VideoController = None
|
||||
def load_configuration(path): return {}
|
||||
def save_configuration(config, path): pass
|
||||
|
||||
class FinaleMainWindow(QMainWindow):
|
||||
"""
|
||||
Modern main window for traffic monitoring with advanced UI.
|
||||
Connects to existing detection/violation logic without modifying it.
|
||||
"""
|
||||
|
||||
# Signals for UI updates
|
||||
theme_changed = Signal(bool) # dark_mode
|
||||
view_changed = Signal(str) # view_name
|
||||
fullscreen_toggled = Signal(bool)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
# Initialize settings and configuration
|
||||
self.settings = QSettings("Finale", "TrafficMonitoring")
|
||||
self.config_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), "qt_app_pyside", "config.json")
|
||||
self.config = load_configuration(self.config_file)
|
||||
|
||||
# UI state
|
||||
self.dark_mode = True
|
||||
self.current_view = "live"
|
||||
self.is_fullscreen = False
|
||||
|
||||
# Animation system
|
||||
self.animations = {}
|
||||
|
||||
# Initialize UI
|
||||
self.setup_ui()
|
||||
|
||||
# Initialize backend controllers (existing logic)
|
||||
self.setup_controllers()
|
||||
|
||||
# Connect signals
|
||||
self.connect_signals()
|
||||
|
||||
# Apply theme and restore settings
|
||||
self.apply_theme()
|
||||
self.restore_settings()
|
||||
|
||||
# Show ready message
|
||||
self.statusBar().showMessage("Finale UI Ready", 3000)
|
||||
|
||||
def setup_ui(self):
|
||||
"""Set up the modern user interface"""
|
||||
# Window properties with advanced styling
|
||||
self.setWindowTitle("Finale Traffic Monitoring System")
|
||||
self.setMinimumSize(1400, 900)
|
||||
self.resize(1600, 1000)
|
||||
|
||||
# Set window icon
|
||||
self.setWindowIcon(FinaleIcons.get_icon("traffic_monitoring"))
|
||||
|
||||
# Create central widget with modern layout
|
||||
self.setup_central_widget()
|
||||
|
||||
# Create modern toolbar
|
||||
self.setup_toolbar()
|
||||
|
||||
# Create docked widgets
|
||||
self.setup_dock_widgets()
|
||||
|
||||
# Create status bar
|
||||
self.setup_status_bar()
|
||||
|
||||
# Create menu bar
|
||||
self.setup_menu_bar()
|
||||
|
||||
# Apply initial styling
|
||||
self.setStyleSheet(FinaleStyles.get_main_window_style())
|
||||
|
||||
def setup_central_widget(self):
|
||||
"""Create the central widget with modern tabbed interface"""
|
||||
# Create main splitter for flexible layout
|
||||
self.main_splitter = QSplitter(Qt.Horizontal)
|
||||
|
||||
# Create left panel for main content
|
||||
self.content_widget = QWidget()
|
||||
self.content_layout = QVBoxLayout(self.content_widget)
|
||||
self.content_layout.setContentsMargins(0, 0, 0, 0)
|
||||
self.content_layout.setSpacing(0)
|
||||
|
||||
# Create modern tab widget
|
||||
self.tabs = QTabWidget()
|
||||
self.tabs.setTabPosition(QTabWidget.North)
|
||||
self.tabs.setMovable(True)
|
||||
self.tabs.setTabsClosable(False)
|
||||
|
||||
# Create views (these will be implemented next)
|
||||
self.live_view = LiveView()
|
||||
self.analytics_view = AnalyticsView()
|
||||
self.violations_view = ViolationsView()
|
||||
self.settings_view = SettingsView()
|
||||
|
||||
# Add tabs with icons
|
||||
self.tabs.addTab(self.live_view, FinaleIcons.get_icon("live"), "Live Detection")
|
||||
self.tabs.addTab(self.analytics_view, FinaleIcons.get_icon("analytics"), "Analytics")
|
||||
self.tabs.addTab(self.violations_view, FinaleIcons.get_icon("warning"), "Violations")
|
||||
self.tabs.addTab(self.settings_view, FinaleIcons.get_icon("settings"), "Settings")
|
||||
|
||||
# Style the tab widget
|
||||
self.tabs.setStyleSheet(FinaleStyles.get_tab_widget_style())
|
||||
|
||||
# Add to layout
|
||||
self.content_layout.addWidget(self.tabs)
|
||||
self.main_splitter.addWidget(self.content_widget)
|
||||
|
||||
# Set as central widget
|
||||
self.setCentralWidget(self.main_splitter)
|
||||
|
||||
def setup_toolbar(self):
|
||||
"""Create the modern toolbar"""
|
||||
self.toolbar = FinaleToolbar(self)
|
||||
self.addToolBar(Qt.TopToolBarArea, self.toolbar)
|
||||
|
||||
# Connect toolbar signals
|
||||
self.toolbar.play_clicked.connect(self.on_play_clicked)
|
||||
self.toolbar.pause_clicked.connect(self.on_pause_clicked)
|
||||
self.toolbar.stop_clicked.connect(self.on_stop_clicked)
|
||||
self.toolbar.record_clicked.connect(self.on_record_clicked)
|
||||
self.toolbar.snapshot_clicked.connect(self.on_snapshot_clicked)
|
||||
self.toolbar.settings_clicked.connect(self.show_settings)
|
||||
self.toolbar.fullscreen_clicked.connect(self.toggle_fullscreen)
|
||||
self.toolbar.theme_changed.connect(self.set_dark_mode)
|
||||
|
||||
def setup_dock_widgets(self):
|
||||
"""Create docked widgets for statistics and controls"""
|
||||
# Stats dock widget
|
||||
self.stats_dock = QDockWidget("Statistics", self)
|
||||
self.stats_dock.setObjectName("StatsDock")
|
||||
self.stats_widget = StatsWidget()
|
||||
self.stats_dock.setWidget(self.stats_widget)
|
||||
self.stats_dock.setFeatures(
|
||||
QDockWidget.DockWidgetMovable |
|
||||
QDockWidget.DockWidgetClosable |
|
||||
QDockWidget.DockWidgetFloatable
|
||||
)
|
||||
self.addDockWidget(Qt.RightDockWidgetArea, self.stats_dock)
|
||||
|
||||
# Metrics dock widget
|
||||
self.metrics_dock = QDockWidget("Performance", self)
|
||||
self.metrics_dock.setObjectName("MetricsDock")
|
||||
self.metrics_widget = MetricsWidget()
|
||||
self.metrics_dock.setWidget(self.metrics_widget)
|
||||
self.metrics_dock.setFeatures(
|
||||
QDockWidget.DockWidgetMovable |
|
||||
QDockWidget.DockWidgetClosable |
|
||||
QDockWidget.DockWidgetFloatable
|
||||
)
|
||||
self.addDockWidget(Qt.RightDockWidgetArea, self.metrics_dock)
|
||||
|
||||
# System resources dock widget
|
||||
self.system_dock = QDockWidget("System", self)
|
||||
self.system_dock.setObjectName("SystemDock")
|
||||
self.system_widget = SystemResourceWidget()
|
||||
self.system_dock.setWidget(self.system_widget)
|
||||
self.system_dock.setFeatures(
|
||||
QDockWidget.DockWidgetMovable |
|
||||
QDockWidget.DockWidgetClosable |
|
||||
QDockWidget.DockWidgetFloatable
|
||||
)
|
||||
self.addDockWidget(Qt.RightDockWidgetArea, self.system_dock)
|
||||
|
||||
# Tabify dock widgets for space efficiency
|
||||
self.tabifyDockWidget(self.stats_dock, self.metrics_dock)
|
||||
self.tabifyDockWidget(self.metrics_dock, self.system_dock)
|
||||
|
||||
# Show stats dock by default
|
||||
self.stats_dock.raise_()
|
||||
|
||||
# Apply dock widget styling
|
||||
for dock in [self.stats_dock, self.metrics_dock, self.system_dock]:
|
||||
dock.setStyleSheet(FinaleStyles.get_dock_widget_style())
|
||||
|
||||
def setup_status_bar(self):
|
||||
"""Create modern status bar"""
|
||||
self.status_bar = QStatusBar()
|
||||
self.setStatusBar(self.status_bar)
|
||||
|
||||
# Add permanent widgets to status bar
|
||||
self.fps_label = QWidget()
|
||||
self.connection_label = QWidget()
|
||||
self.model_label = QWidget()
|
||||
|
||||
self.status_bar.addPermanentWidget(self.fps_label)
|
||||
self.status_bar.addPermanentWidget(self.connection_label)
|
||||
self.status_bar.addPermanentWidget(self.model_label)
|
||||
|
||||
# Style status bar
|
||||
self.status_bar.setStyleSheet(FinaleStyles.get_status_bar_style())
|
||||
|
||||
def setup_menu_bar(self):
|
||||
"""Create modern menu bar"""
|
||||
self.menu_bar = self.menuBar()
|
||||
|
||||
# File menu
|
||||
file_menu = self.menu_bar.addMenu("&File")
|
||||
|
||||
open_action = QAction(FinaleIcons.get_icon("folder"), "&Open Video", self)
|
||||
open_action.setShortcut("Ctrl+O")
|
||||
open_action.triggered.connect(self.open_file)
|
||||
file_menu.addAction(open_action)
|
||||
|
||||
save_action = QAction(FinaleIcons.get_icon("save"), "&Save Config", self)
|
||||
save_action.setShortcut("Ctrl+S")
|
||||
save_action.triggered.connect(self.save_config)
|
||||
file_menu.addAction(save_action)
|
||||
|
||||
file_menu.addSeparator()
|
||||
|
||||
exit_action = QAction(FinaleIcons.get_icon("exit"), "E&xit", self)
|
||||
exit_action.setShortcut("Ctrl+Q")
|
||||
exit_action.triggered.connect(self.close)
|
||||
file_menu.addAction(exit_action)
|
||||
|
||||
# View menu
|
||||
view_menu = self.menu_bar.addMenu("&View")
|
||||
|
||||
fullscreen_action = QAction(FinaleIcons.get_icon("fullscreen"), "&Fullscreen", self)
|
||||
fullscreen_action.setShortcut("F11")
|
||||
fullscreen_action.setCheckable(True)
|
||||
fullscreen_action.triggered.connect(self.toggle_fullscreen)
|
||||
view_menu.addAction(fullscreen_action)
|
||||
|
||||
theme_action = QAction(FinaleIcons.get_icon("theme"), "&Dark Theme", self)
|
||||
theme_action.setCheckable(True)
|
||||
theme_action.setChecked(self.dark_mode)
|
||||
theme_action.triggered.connect(self.toggle_theme)
|
||||
view_menu.addAction(theme_action)
|
||||
|
||||
# Tools menu
|
||||
tools_menu = self.menu_bar.addMenu("&Tools")
|
||||
|
||||
settings_action = QAction(FinaleIcons.get_icon("settings"), "&Settings", self)
|
||||
settings_action.setShortcut("Ctrl+,")
|
||||
settings_action.triggered.connect(self.show_settings)
|
||||
tools_menu.addAction(settings_action)
|
||||
|
||||
# Help menu
|
||||
help_menu = self.menu_bar.addMenu("&Help")
|
||||
|
||||
about_action = QAction(FinaleIcons.get_icon("info"), "&About", self)
|
||||
about_action.triggered.connect(self.show_about)
|
||||
help_menu.addAction(about_action)
|
||||
|
||||
# Style menu bar
|
||||
self.menu_bar.setStyleSheet(FinaleStyles.get_menu_bar_style())
|
||||
|
||||
def setup_controllers(self):
|
||||
"""Initialize backend controllers (existing logic)"""
|
||||
try:
|
||||
# Initialize model manager (existing from qt_app_pyside)
|
||||
self.model_manager = ModelManager(self.config_file)
|
||||
|
||||
# Initialize video controller (existing from qt_app_pyside)
|
||||
self.video_controller = VideoController(self.model_manager)
|
||||
|
||||
# Initialize analytics controller (existing from qt_app_pyside)
|
||||
self.analytics_controller = AnalyticsController()
|
||||
|
||||
# Initialize performance overlay (existing from qt_app_pyside)
|
||||
self.performance_overlay = PerformanceOverlay()
|
||||
|
||||
print("✅ Backend controllers initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error initializing controllers: {e}")
|
||||
QMessageBox.critical(self, "Initialization Error",
|
||||
f"Failed to initialize backend controllers:\n{str(e)}")
|
||||
|
||||
def connect_signals(self):
|
||||
"""Connect signals between UI and backend"""
|
||||
try:
|
||||
# Connect video controller signals to UI updates
|
||||
if hasattr(self.video_controller, 'frame_ready'):
|
||||
self.video_controller.frame_ready.connect(self.on_frame_ready)
|
||||
|
||||
if hasattr(self.video_controller, 'stats_ready'):
|
||||
self.video_controller.stats_ready.connect(self.on_stats_ready)
|
||||
|
||||
if hasattr(self.video_controller, 'violation_detected'):
|
||||
self.video_controller.violation_detected.connect(self.on_violation_detected)
|
||||
|
||||
# Connect tab change signal
|
||||
self.tabs.currentChanged.connect(self.on_tab_changed)
|
||||
|
||||
# Connect view signals to backend
|
||||
self.live_view.source_changed.connect(self.on_source_changed)
|
||||
|
||||
print("✅ Signals connected successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error connecting signals: {e}")
|
||||
|
||||
# Event handlers for UI interactions
|
||||
@Slot()
|
||||
def on_play_clicked(self):
|
||||
"""Handle play button click"""
|
||||
if hasattr(self.video_controller, 'start'):
|
||||
self.video_controller.start()
|
||||
self.toolbar.set_playback_state("playing")
|
||||
|
||||
@Slot()
|
||||
def on_pause_clicked(self):
|
||||
"""Handle pause button click"""
|
||||
if hasattr(self.video_controller, 'pause'):
|
||||
self.video_controller.pause()
|
||||
self.toolbar.set_playback_state("paused")
|
||||
|
||||
@Slot()
|
||||
def on_stop_clicked(self):
|
||||
"""Handle stop button click"""
|
||||
if hasattr(self.video_controller, 'stop'):
|
||||
self.video_controller.stop()
|
||||
self.toolbar.set_playback_state("stopped")
|
||||
|
||||
@Slot()
|
||||
def on_record_clicked(self):
|
||||
"""Handle record button click"""
|
||||
# Implementation depends on existing recording logic
|
||||
pass
|
||||
|
||||
@Slot()
|
||||
def on_snapshot_clicked(self):
|
||||
"""Handle snapshot button click"""
|
||||
# Implementation depends on existing snapshot logic
|
||||
pass
|
||||
|
||||
# Backend signal handlers
|
||||
@Slot(object, object, dict)
|
||||
def on_frame_ready(self, pixmap, detections, metrics):
|
||||
"""Handle frame ready signal from video controller"""
|
||||
# Update live view
|
||||
if self.current_view == "live":
|
||||
self.live_view.update_frame(pixmap, detections)
|
||||
|
||||
# Update toolbar status
|
||||
self.toolbar.update_status("processing", True)
|
||||
|
||||
@Slot(dict)
|
||||
def on_stats_ready(self, stats):
|
||||
"""Handle stats ready signal from video controller"""
|
||||
# Update stats widgets
|
||||
self.stats_widget.update_stats(stats)
|
||||
self.metrics_widget.update_metrics(stats)
|
||||
|
||||
# Update toolbar FPS
|
||||
if 'fps' in stats:
|
||||
self.toolbar.update_fps(stats['fps'])
|
||||
|
||||
@Slot(dict)
|
||||
def on_violation_detected(self, violation_data):
|
||||
"""Handle violation detected signal"""
|
||||
# Update violations view
|
||||
self.violations_view.add_violation(violation_data)
|
||||
|
||||
# Update toolbar status
|
||||
self.toolbar.update_status("violation", True)
|
||||
|
||||
# Play notification sound/animation if enabled
|
||||
self.play_violation_notification()
|
||||
|
||||
@Slot(str)
|
||||
def on_source_changed(self, source_path):
|
||||
"""Handle source change from live view"""
|
||||
if hasattr(self.video_controller, 'set_source'):
|
||||
self.video_controller.set_source(source_path)
|
||||
|
||||
@Slot(int)
|
||||
def on_tab_changed(self, index):
|
||||
"""Handle tab change"""
|
||||
tab_names = ["live", "analytics", "violations", "settings"]
|
||||
if 0 <= index < len(tab_names):
|
||||
self.current_view = tab_names[index]
|
||||
self.view_changed.emit(self.current_view)
|
||||
|
||||
# UI control methods
|
||||
def toggle_fullscreen(self):
|
||||
"""Toggle fullscreen mode"""
|
||||
if self.isFullScreen():
|
||||
self.showNormal()
|
||||
self.is_fullscreen = False
|
||||
else:
|
||||
self.showFullScreen()
|
||||
self.is_fullscreen = True
|
||||
|
||||
self.fullscreen_toggled.emit(self.is_fullscreen)
|
||||
|
||||
def toggle_theme(self):
|
||||
"""Toggle between dark and light theme"""
|
||||
self.set_dark_mode(not self.dark_mode)
|
||||
|
||||
def set_dark_mode(self, dark_mode):
|
||||
"""Set theme mode"""
|
||||
self.dark_mode = dark_mode
|
||||
self.apply_theme()
|
||||
self.theme_changed.emit(self.dark_mode)
|
||||
|
||||
def apply_theme(self):
|
||||
"""Apply current theme to all UI elements"""
|
||||
# Apply main styles
|
||||
self.setStyleSheet(FinaleStyles.get_main_window_style(self.dark_mode))
|
||||
|
||||
# Update all child widgets
|
||||
for child in self.findChildren(QWidget):
|
||||
if hasattr(child, 'apply_theme'):
|
||||
child.apply_theme(self.dark_mode)
|
||||
|
||||
# Update color scheme
|
||||
if self.dark_mode:
|
||||
MaterialColors.apply_dark_theme()
|
||||
else:
|
||||
MaterialColors.apply_light_theme()
|
||||
|
||||
def show_settings(self):
|
||||
"""Show settings view"""
|
||||
self.tabs.setCurrentWidget(self.settings_view)
|
||||
|
||||
def show_about(self):
|
||||
"""Show about dialog"""
|
||||
QMessageBox.about(self, "About Finale UI",
|
||||
"Finale Traffic Monitoring System\n"
|
||||
"Modern UI for OpenVINO-based traffic detection\n"
|
||||
"Built with PySide6 and Material Design")
|
||||
|
||||
def open_file(self):
|
||||
"""Open file dialog for video source"""
|
||||
file_path, _ = QFileDialog.getOpenFileName(
|
||||
self, "Open Video File", "",
|
||||
"Video Files (*.mp4 *.avi *.mov *.mkv);;All Files (*)"
|
||||
)
|
||||
if file_path:
|
||||
self.on_source_changed(file_path)
|
||||
|
||||
def save_config(self):
|
||||
"""Save current configuration"""
|
||||
try:
|
||||
save_configuration(self.config, self.config_file)
|
||||
self.statusBar().showMessage("Configuration saved", 3000)
|
||||
except Exception as e:
|
||||
QMessageBox.warning(self, "Save Error", f"Failed to save configuration:\n{str(e)}")
|
||||
|
||||
def play_violation_notification(self):
|
||||
"""Play violation notification (visual/audio)"""
|
||||
# Create a brief red flash animation
|
||||
self.create_violation_flash()
|
||||
|
||||
def create_violation_flash(self):
|
||||
"""Create a red flash effect for violations"""
|
||||
# Create a semi-transparent red overlay
|
||||
overlay = QWidget(self)
|
||||
overlay.setStyleSheet("background-color: rgba(244, 67, 54, 0.3);")
|
||||
overlay.resize(self.size())
|
||||
overlay.show()
|
||||
|
||||
# Animate the overlay
|
||||
self.flash_animation = QPropertyAnimation(overlay, b"windowOpacity")
|
||||
self.flash_animation.setDuration(500)
|
||||
self.flash_animation.setStartValue(0.3)
|
||||
self.flash_animation.setEndValue(0.0)
|
||||
self.flash_animation.setEasingCurve(QEasingCurve.OutCubic)
|
||||
self.flash_animation.finished.connect(overlay.deleteLater)
|
||||
self.flash_animation.start()
|
||||
|
||||
# Settings persistence
|
||||
def save_settings(self):
|
||||
"""Save window settings"""
|
||||
self.settings.setValue("geometry", self.saveGeometry())
|
||||
self.settings.setValue("windowState", self.saveState())
|
||||
self.settings.setValue("dark_mode", self.dark_mode)
|
||||
self.settings.setValue("current_view", self.current_view)
|
||||
|
||||
def restore_settings(self):
|
||||
"""Restore window settings"""
|
||||
if self.settings.contains("geometry"):
|
||||
self.restoreGeometry(self.settings.value("geometry"))
|
||||
if self.settings.contains("windowState"):
|
||||
self.restoreState(self.settings.value("windowState"))
|
||||
if self.settings.contains("dark_mode"):
|
||||
self.dark_mode = self.settings.value("dark_mode", True, bool)
|
||||
if self.settings.contains("current_view"):
|
||||
view_name = self.settings.value("current_view", "live")
|
||||
view_index = {"live": 0, "analytics": 1, "violations": 2, "settings": 3}.get(view_name, 0)
|
||||
self.tabs.setCurrentIndex(view_index)
|
||||
|
||||
def closeEvent(self, event):
|
||||
"""Handle window close event"""
|
||||
# Save settings
|
||||
self.save_settings()
|
||||
|
||||
# Stop video controller
|
||||
if hasattr(self.video_controller, 'stop'):
|
||||
self.video_controller.stop()
|
||||
|
||||
# Accept close event
|
||||
event.accept()
|
||||
641
qt_app_pyside1/finale/main_window_old.py
Normal file
641
qt_app_pyside1/finale/main_window_old.py
Normal file
@@ -0,0 +1,641 @@
|
||||
from PySide6.QtWidgets import (
|
||||
QMainWindow, QTabWidget, QDockWidget, QMessageBox,
|
||||
QApplication, QFileDialog, QSplashScreen
|
||||
)
|
||||
from PySide6.QtCore import Qt, QTimer, QSettings, QSize, Slot
|
||||
from PySide6.QtGui import QIcon, QPixmap, QAction
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
# Custom exception handler for Qt
|
||||
def qt_message_handler(mode, context, message):
|
||||
print(f"Qt Message: {message} (Mode: {mode})")
|
||||
|
||||
# Install custom handler for Qt messages
|
||||
if hasattr(Qt, 'qInstallMessageHandler'):
|
||||
Qt.qInstallMessageHandler(qt_message_handler)
|
||||
|
||||
# Import UI components
|
||||
from ..ui.fixed_live_tab import LiveTab # Using fixed version
|
||||
from ..ui.analytics_tab import AnalyticsTab
|
||||
from ..ui.violations_tab import ViolationsTab
|
||||
from ..ui.export_tab import ExportTab
|
||||
from ..ui.config_panel import ConfigPanel
|
||||
|
||||
# Import controllers
|
||||
from ..controllers.video_controller_new import VideoController
|
||||
from ..controllers.analytics_controller import AnalyticsController
|
||||
from ..controllers.performance_overlay import PerformanceOverlay
|
||||
from ..controllers.model_manager import ModelManager
|
||||
|
||||
# Import utilities
|
||||
from ..utils.helpers import load_configuration, save_configuration, save_snapshot
|
||||
|
||||
class MainWindow(QMainWindow):
|
||||
"""Main application window."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
# Initialize settings and configuration
|
||||
self.settings = QSettings("OpenVINO", "TrafficMonitoring")
|
||||
self.config_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), "config.json")
|
||||
self.config = load_configuration(self.config_file)
|
||||
|
||||
# Set up UI
|
||||
self.setupUI()
|
||||
|
||||
# Initialize controllers
|
||||
self.setupControllers()
|
||||
|
||||
# Connect signals and slots
|
||||
self.connectSignals()
|
||||
|
||||
# Restore settings
|
||||
self.restoreSettings()
|
||||
|
||||
# Apply theme
|
||||
self.applyTheme(True) # Start with dark theme
|
||||
|
||||
# Show ready message
|
||||
self.statusBar().showMessage("Ready")
|
||||
|
||||
def setupUI(self):
|
||||
"""Set up the user interface"""
|
||||
# Window properties
|
||||
self.setWindowTitle("Traffic Monitoring System (OpenVINO PySide6)")
|
||||
self.setMinimumSize(1200, 800)
|
||||
self.resize(1400, 900)
|
||||
|
||||
# Set up central widget with tabs
|
||||
self.tabs = QTabWidget()
|
||||
|
||||
# Create tabs
|
||||
self.live_tab = LiveTab()
|
||||
self.analytics_tab = AnalyticsTab()
|
||||
self.violations_tab = ViolationsTab()
|
||||
self.export_tab = ExportTab()
|
||||
|
||||
# Add tabs to tab widget
|
||||
self.tabs.addTab(self.live_tab, "Live Detection")
|
||||
self.tabs.addTab(self.analytics_tab, "Analytics")
|
||||
self.tabs.addTab(self.violations_tab, "Violations")
|
||||
self.tabs.addTab(self.export_tab, "Export & Config")
|
||||
|
||||
# Set central widget
|
||||
self.setCentralWidget(self.tabs)
|
||||
# Create config panel in dock widget
|
||||
self.config_panel = ConfigPanel()
|
||||
dock = QDockWidget("Settings", self)
|
||||
dock.setObjectName("SettingsDock") # Set object name to avoid warning
|
||||
dock.setWidget(self.config_panel)
|
||||
dock.setFeatures(QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetClosable)
|
||||
dock.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
|
||||
self.addDockWidget(Qt.RightDockWidgetArea, dock)
|
||||
|
||||
# Create status bar
|
||||
self.statusBar().showMessage("Initializing...")
|
||||
|
||||
# Create menu bar
|
||||
self.setupMenus()
|
||||
|
||||
# Create performance overlay
|
||||
self.performance_overlay = PerformanceOverlay()
|
||||
|
||||
def setupControllers(self):
|
||||
"""Set up controllers and models"""
|
||||
# Load config from file
|
||||
try:
|
||||
# Initialize model manager
|
||||
self.model_manager = ModelManager(self.config_file)
|
||||
|
||||
# Create video controller
|
||||
self.video_controller = VideoController(self.model_manager)
|
||||
|
||||
# Create analytics controller
|
||||
self.analytics_controller = AnalyticsController()
|
||||
|
||||
# Setup update timer for performance overlay
|
||||
self.perf_timer = QTimer()
|
||||
self.perf_timer.timeout.connect(self.performance_overlay.update_stats)
|
||||
self.perf_timer.start(1000) # Update every second
|
||||
|
||||
except Exception as e:
|
||||
QMessageBox.critical(
|
||||
self,
|
||||
"Initialization Error",
|
||||
f"Error initializing controllers: {str(e)}"
|
||||
)
|
||||
print(f"Error details: {e}")
|
||||
|
||||
|
||||
def connectSignals(self):
|
||||
"""Connect signals and slots between components""" # Video controller connections - With extra debug
|
||||
print("🔌 Connecting video controller signals...")
|
||||
try:
|
||||
# Connect for UI frame updates (QPixmap-based)
|
||||
self.video_controller.frame_ready.connect(self.live_tab.update_display, Qt.QueuedConnection)
|
||||
print("✅ Connected frame_ready signal") # Connect for direct NumPy frame display (critical for live video)
|
||||
try:
|
||||
self.video_controller.frame_np_ready.connect(self.live_tab.update_display_np, Qt.QueuedConnection)
|
||||
print("✅ Connected frame_np_ready signal")
|
||||
# PySide6 doesn't have isConnected method, so let's just confirm the connection works
|
||||
print("🔌 frame_np_ready connection should be established")
|
||||
except Exception as e:
|
||||
print(f"❌ Error connecting frame_np_ready signal: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
# Connect stats signal
|
||||
self.video_controller.stats_ready.connect(self.live_tab.update_stats, Qt.QueuedConnection)
|
||||
# Also connect stats signal to update traffic light status in main window
|
||||
self.video_controller.stats_ready.connect(self.update_traffic_light_status, Qt.QueuedConnection)
|
||||
print("✅ Connected stats_ready signals")
|
||||
# Connect raw frame data for analytics
|
||||
self.video_controller.raw_frame_ready.connect(self.analytics_controller.process_frame_data)
|
||||
print("✅ Connected raw_frame_ready signal")
|
||||
|
||||
# Connect for traffic light status updates
|
||||
self.video_controller.stats_ready.connect(self.update_traffic_light_status, Qt.QueuedConnection)
|
||||
print("✅ Connected stats_ready signal to update_traffic_light_status")
|
||||
|
||||
# Connect violation detection signal
|
||||
try:
|
||||
self.video_controller.violation_detected.connect(self.handle_violation_detected, Qt.QueuedConnection)
|
||||
print("✅ Connected violation_detected signal")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Could not connect violation signal: {e}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error connecting signals: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# Live tab connections
|
||||
self.live_tab.source_changed.connect(self.video_controller.set_source)
|
||||
self.live_tab.video_dropped.connect(self.video_controller.set_source)
|
||||
self.live_tab.snapshot_requested.connect(self.take_snapshot)
|
||||
self.live_tab.run_requested.connect(self.toggle_video_processing)
|
||||
|
||||
# Config panel connections
|
||||
self.config_panel.config_changed.connect(self.apply_config)
|
||||
self.config_panel.theme_toggled.connect(self.applyTheme)
|
||||
|
||||
# Analytics controller connections
|
||||
self.analytics_controller.analytics_updated.connect(self.analytics_tab.update_analytics)
|
||||
self.analytics_controller.analytics_updated.connect(self.export_tab.update_export_preview)
|
||||
|
||||
# Tab-specific connections
|
||||
self.violations_tab.clear_btn.clicked.connect(self.analytics_controller.clear_statistics)
|
||||
self.export_tab.reset_btn.clicked.connect(self.config_panel.reset_config)
|
||||
self.export_tab.save_config_btn.clicked.connect(self.save_config)
|
||||
self.export_tab.reload_config_btn.clicked.connect(self.load_config)
|
||||
self.export_tab.export_btn.clicked.connect(self.export_data)
|
||||
|
||||
def setupMenus(self):
|
||||
"""Set up application menus"""
|
||||
# File menu
|
||||
file_menu = self.menuBar().addMenu("&File")
|
||||
|
||||
open_action = QAction("&Open Video...", self)
|
||||
open_action.setShortcut("Ctrl+O")
|
||||
open_action.triggered.connect(self.open_video_file)
|
||||
file_menu.addAction(open_action)
|
||||
|
||||
file_menu.addSeparator()
|
||||
|
||||
snapshot_action = QAction("Take &Snapshot", self)
|
||||
snapshot_action.setShortcut("Ctrl+S")
|
||||
snapshot_action.triggered.connect(self.take_snapshot)
|
||||
file_menu.addAction(snapshot_action)
|
||||
|
||||
file_menu.addSeparator()
|
||||
|
||||
exit_action = QAction("E&xit", self)
|
||||
exit_action.setShortcut("Alt+F4")
|
||||
exit_action.triggered.connect(self.close)
|
||||
file_menu.addAction(exit_action)
|
||||
|
||||
# View menu
|
||||
view_menu = self.menuBar().addMenu("&View")
|
||||
|
||||
toggle_config_action = QAction("Show/Hide &Settings Panel", self)
|
||||
toggle_config_action.setShortcut("F4")
|
||||
toggle_config_action.triggered.connect(self.toggle_config_panel)
|
||||
view_menu.addAction(toggle_config_action)
|
||||
|
||||
toggle_perf_action = QAction("Show/Hide &Performance Overlay", self)
|
||||
toggle_perf_action.setShortcut("F5")
|
||||
toggle_perf_action.triggered.connect(self.toggle_performance_overlay)
|
||||
view_menu.addAction(toggle_perf_action)
|
||||
|
||||
# Help menu
|
||||
help_menu = self.menuBar().addMenu("&Help")
|
||||
|
||||
about_action = QAction("&About", self)
|
||||
about_action.triggered.connect(self.show_about_dialog)
|
||||
help_menu.addAction(about_action)
|
||||
|
||||
@Slot(dict)
|
||||
def apply_config(self, config):
|
||||
"""
|
||||
Apply configuration changes.
|
||||
|
||||
Args:
|
||||
config: Configuration dictionary
|
||||
"""
|
||||
# Update configuration
|
||||
if not config:
|
||||
return
|
||||
|
||||
# Update config
|
||||
for section in config:
|
||||
if section in self.config:
|
||||
self.config[section].update(config[section])
|
||||
else:
|
||||
self.config[section] = config[section]
|
||||
|
||||
# Update model manager
|
||||
if self.model_manager:
|
||||
self.model_manager.update_config(self.config)
|
||||
|
||||
# Save config to file
|
||||
save_configuration(self.config, self.config_file)
|
||||
|
||||
# Update export tab
|
||||
self.export_tab.update_config_display(self.config)
|
||||
|
||||
# Update status
|
||||
self.statusBar().showMessage("Configuration applied", 2000)
|
||||
|
||||
@Slot()
|
||||
def load_config(self):
|
||||
"""Load configuration from file"""
|
||||
# Ask for confirmation if needed
|
||||
if self.video_controller and self.video_controller._running:
|
||||
reply = QMessageBox.question(
|
||||
self,
|
||||
"Reload Configuration",
|
||||
"Reloading configuration will stop current processing. Continue?",
|
||||
QMessageBox.Yes | QMessageBox.No,
|
||||
QMessageBox.No
|
||||
)
|
||||
|
||||
if reply == QMessageBox.No:
|
||||
return
|
||||
|
||||
# Stop processing
|
||||
self.video_controller.stop()
|
||||
|
||||
# Load config
|
||||
self.config = load_configuration(self.config_file)
|
||||
|
||||
# Update UI
|
||||
self.config_panel.set_config(self.config)
|
||||
self.export_tab.update_config_display(self.config)
|
||||
|
||||
# Update model manager
|
||||
if self.model_manager:
|
||||
self.model_manager.update_config(self.config)
|
||||
|
||||
# Update status
|
||||
self.statusBar().showMessage("Configuration loaded", 2000)
|
||||
|
||||
@Slot()
|
||||
def save_config(self):
|
||||
"""Save configuration to file"""
|
||||
# Get config from UI
|
||||
ui_config = self.export_tab.get_config_from_ui()
|
||||
|
||||
# Update config
|
||||
for section in ui_config:
|
||||
if section in self.config:
|
||||
self.config[section].update(ui_config[section])
|
||||
else:
|
||||
self.config[section] = ui_config[section]
|
||||
|
||||
# Save to file
|
||||
if save_configuration(self.config, self.config_file):
|
||||
self.statusBar().showMessage("Configuration saved", 2000)
|
||||
else:
|
||||
self.statusBar().showMessage("Error saving configuration", 2000)
|
||||
|
||||
# Update model manager
|
||||
if self.model_manager:
|
||||
self.model_manager.update_config(self.config)
|
||||
|
||||
@Slot()
|
||||
def open_video_file(self):
|
||||
"""Open video file dialog"""
|
||||
file_path, _ = QFileDialog.getOpenFileName(
|
||||
self,
|
||||
"Open Video File",
|
||||
"",
|
||||
"Video Files (*.mp4 *.avi *.mov *.mkv *.webm);;All Files (*)"
|
||||
)
|
||||
|
||||
if file_path:
|
||||
# Update live tab
|
||||
self.live_tab.source_changed.emit(file_path)
|
||||
|
||||
# Update status
|
||||
self.statusBar().showMessage(f"Loaded video: {os.path.basename(file_path)}")
|
||||
|
||||
@Slot()
|
||||
def take_snapshot(self):
|
||||
"""Take snapshot of current frame"""
|
||||
if self.video_controller:
|
||||
# Get current frame
|
||||
frame = self.video_controller.capture_snapshot()
|
||||
|
||||
if frame is not None:
|
||||
# Save frame to file
|
||||
save_dir = self.settings.value("snapshot_dir", ".")
|
||||
file_path = os.path.join(save_dir, "snapshot_" +
|
||||
str(int(time.time())) + ".jpg")
|
||||
|
||||
saved_path = save_snapshot(frame, file_path)
|
||||
|
||||
if saved_path:
|
||||
self.statusBar().showMessage(f"Snapshot saved: {saved_path}", 3000)
|
||||
else:
|
||||
self.statusBar().showMessage("Error saving snapshot", 3000)
|
||||
else:
|
||||
self.statusBar().showMessage("No frame to capture", 3000)
|
||||
|
||||
@Slot()
|
||||
def toggle_config_panel(self):
|
||||
"""Toggle configuration panel visibility"""
|
||||
dock_widgets = self.findChildren(QDockWidget)
|
||||
for dock in dock_widgets:
|
||||
dock.setVisible(not dock.isVisible())
|
||||
|
||||
@Slot()
|
||||
def toggle_performance_overlay(self):
|
||||
"""Toggle performance overlay visibility"""
|
||||
if self.performance_overlay.isVisible():
|
||||
self.performance_overlay.hide()
|
||||
else:
|
||||
# Position in the corner
|
||||
self.performance_overlay.move(self.pos().x() + 10, self.pos().y() + 30)
|
||||
self.performance_overlay.show()
|
||||
|
||||
@Slot(bool)
|
||||
def applyTheme(self, dark_theme):
|
||||
"""
|
||||
Apply light or dark theme.
|
||||
|
||||
Args:
|
||||
dark_theme: True for dark theme, False for light theme
|
||||
"""
|
||||
if dark_theme:
|
||||
# Load dark theme stylesheet
|
||||
theme_file = os.path.join(
|
||||
os.path.dirname(os.path.dirname(__file__)),
|
||||
"resources", "themes", "dark.qss"
|
||||
)
|
||||
else:
|
||||
# Load light theme stylesheet
|
||||
theme_file = os.path.join(
|
||||
os.path.dirname(os.path.dirname(__file__)),
|
||||
"resources", "themes", "light.qss"
|
||||
)
|
||||
|
||||
# Apply theme if file exists
|
||||
if os.path.exists(theme_file):
|
||||
with open(theme_file, "r") as f:
|
||||
self.setStyleSheet(f.read())
|
||||
else:
|
||||
# Fallback to built-in style
|
||||
self.setStyleSheet("")
|
||||
|
||||
@Slot()
|
||||
def export_data(self):
|
||||
"""Export data to file"""
|
||||
export_format = self.export_tab.export_format_combo.currentText()
|
||||
export_data = self.export_tab.export_data_combo.currentText()
|
||||
|
||||
# Get file type filter based on format
|
||||
if export_format == "CSV":
|
||||
file_filter = "CSV Files (*.csv)"
|
||||
default_ext = ".csv"
|
||||
elif export_format == "JSON":
|
||||
file_filter = "JSON Files (*.json)"
|
||||
default_ext = ".json"
|
||||
elif export_format == "Excel":
|
||||
file_filter = "Excel Files (*.xlsx)"
|
||||
default_ext = ".xlsx"
|
||||
elif export_format == "PDF Report":
|
||||
file_filter = "PDF Files (*.pdf)"
|
||||
default_ext = ".pdf"
|
||||
else:
|
||||
file_filter = "All Files (*)"
|
||||
default_ext = ".txt"
|
||||
|
||||
# Get save path
|
||||
file_path, _ = QFileDialog.getSaveFileName(
|
||||
self,
|
||||
"Export Data",
|
||||
f"traffic_data{default_ext}",
|
||||
file_filter
|
||||
)
|
||||
|
||||
if not file_path:
|
||||
return
|
||||
|
||||
try:
|
||||
# Get analytics data
|
||||
analytics = self.analytics_controller.get_analytics()
|
||||
|
||||
# Export based on format
|
||||
if export_format == "CSV":
|
||||
from ..utils.helpers import create_export_csv
|
||||
result = create_export_csv(analytics['detection_counts'], file_path)
|
||||
elif export_format == "JSON":
|
||||
from ..utils.helpers import create_export_json
|
||||
result = create_export_json(analytics, file_path)
|
||||
elif export_format == "Excel":
|
||||
# Requires openpyxl
|
||||
try:
|
||||
import pandas as pd
|
||||
df = pd.DataFrame({
|
||||
'Class': list(analytics['detection_counts'].keys()),
|
||||
'Count': list(analytics['detection_counts'].values())
|
||||
})
|
||||
df.to_excel(file_path, index=False)
|
||||
result = True
|
||||
except Exception as e:
|
||||
print(f"Excel export error: {e}")
|
||||
result = False
|
||||
else:
|
||||
# Not implemented
|
||||
QMessageBox.information(
|
||||
self,
|
||||
"Not Implemented",
|
||||
f"Export to {export_format} is not yet implemented."
|
||||
)
|
||||
return
|
||||
|
||||
if result:
|
||||
self.statusBar().showMessage(f"Data exported to {file_path}", 3000)
|
||||
else:
|
||||
self.statusBar().showMessage("Error exporting data", 3000)
|
||||
|
||||
except Exception as e:
|
||||
QMessageBox.critical(
|
||||
self,
|
||||
"Export Error",
|
||||
f"Error exporting data: {str(e)}"
|
||||
)
|
||||
|
||||
@Slot()
|
||||
def show_about_dialog(self):
|
||||
"""Show about dialog"""
|
||||
QMessageBox.about(
|
||||
self,
|
||||
"About Traffic Monitoring System",
|
||||
"<h3>Traffic Monitoring System</h3>"
|
||||
"<p>Based on OpenVINO™ and PySide6</p>"
|
||||
"<p>Version 1.0.0</p>"
|
||||
"<p>© 2025 GSOC Project</p>"
|
||||
)
|
||||
@Slot(bool)
|
||||
def toggle_video_processing(self, start):
|
||||
"""
|
||||
Start or stop video processing.
|
||||
|
||||
Args:
|
||||
start: True to start processing, False to stop
|
||||
"""
|
||||
if self.video_controller:
|
||||
if start:
|
||||
try:
|
||||
# Make sure the source is correctly set to what the LiveTab has
|
||||
current_source = self.live_tab.current_source
|
||||
print(f"DEBUG: MainWindow toggle_processing with source: {current_source} (type: {type(current_source)})")
|
||||
|
||||
# Validate source
|
||||
if current_source is None:
|
||||
self.statusBar().showMessage("Error: No valid source selected")
|
||||
return
|
||||
|
||||
# For file sources, verify file exists
|
||||
if isinstance(current_source, str) and not current_source.isdigit():
|
||||
if not os.path.exists(current_source):
|
||||
self.statusBar().showMessage(f"Error: File not found: {current_source}")
|
||||
return
|
||||
|
||||
# Ensure the source is set before starting
|
||||
print(f"🎥 Setting video controller source to: {current_source}")
|
||||
self.video_controller.set_source(current_source)
|
||||
|
||||
# Now start processing after a short delay to ensure source is set
|
||||
print("⏱️ Scheduling video processing start after 200ms delay...")
|
||||
QTimer.singleShot(200, lambda: self._start_video_processing())
|
||||
|
||||
source_desc = f"file: {os.path.basename(current_source)}" if isinstance(current_source, str) and os.path.exists(current_source) else f"camera: {current_source}"
|
||||
self.statusBar().showMessage(f"Video processing started with {source_desc}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error starting video: {e}")
|
||||
traceback.print_exc()
|
||||
self.statusBar().showMessage(f"Error: {str(e)}")
|
||||
else:
|
||||
try:
|
||||
print("🛑 Stopping video processing...")
|
||||
self.video_controller.stop()
|
||||
print("✅ Video controller stopped")
|
||||
self.statusBar().showMessage("Video processing stopped")
|
||||
except Exception as e:
|
||||
print(f"❌ Error stopping video: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
def _start_video_processing(self):
|
||||
"""Actual video processing start with extra error handling"""
|
||||
try:
|
||||
print("🚀 Starting video controller...")
|
||||
self.video_controller.start()
|
||||
print("✅ Video controller started successfully")
|
||||
except Exception as e:
|
||||
print(f"❌ Error in video processing start: {e}")
|
||||
traceback.print_exc()
|
||||
self.statusBar().showMessage(f"Video processing error: {str(e)}")
|
||||
|
||||
def closeEvent(self, event):
|
||||
"""Handle window close event"""
|
||||
# Stop processing
|
||||
if self.video_controller and self.video_controller._running:
|
||||
self.video_controller.stop()
|
||||
|
||||
# Save settings
|
||||
self.saveSettings()
|
||||
|
||||
# Accept close event
|
||||
event.accept()
|
||||
|
||||
def restoreSettings(self):
|
||||
"""Restore application settings"""
|
||||
# Restore window geometry
|
||||
geometry = self.settings.value("geometry")
|
||||
if geometry:
|
||||
self.restoreGeometry(geometry)
|
||||
|
||||
# Restore window state
|
||||
state = self.settings.value("windowState")
|
||||
if state:
|
||||
self.restoreState(state)
|
||||
|
||||
def saveSettings(self):
|
||||
"""Save application settings"""
|
||||
# Save window geometry
|
||||
self.settings.setValue("geometry", self.saveGeometry())
|
||||
|
||||
# Save window state
|
||||
self.settings.setValue("windowState", self.saveState())
|
||||
|
||||
# Save current directory as snapshot directory
|
||||
self.settings.setValue("snapshot_dir", os.getcwd())
|
||||
@Slot(dict)
|
||||
def update_traffic_light_status(self, stats):
|
||||
"""Update status bar with traffic light information if detected"""
|
||||
traffic_light_info = stats.get('traffic_light_color', 'unknown')
|
||||
|
||||
# Handle both string and dictionary return formats
|
||||
if isinstance(traffic_light_info, dict):
|
||||
traffic_light_color = traffic_light_info.get('color', 'unknown')
|
||||
confidence = traffic_light_info.get('confidence', 0.0)
|
||||
confidence_str = f" (Confidence: {confidence:.2f})" if confidence > 0 else ""
|
||||
else:
|
||||
traffic_light_color = traffic_light_info
|
||||
confidence_str = ""
|
||||
|
||||
if traffic_light_color != 'unknown':
|
||||
current_message = self.statusBar().currentMessage()
|
||||
if not current_message or "Traffic Light" not in current_message:
|
||||
# Handle both dictionary and string formats
|
||||
if isinstance(traffic_light_color, dict):
|
||||
color_text = traffic_light_color.get("color", "unknown").upper()
|
||||
else:
|
||||
color_text = str(traffic_light_color).upper()
|
||||
self.statusBar().showMessage(f"Traffic Light: {color_text}{confidence_str}")
|
||||
@Slot(dict)
|
||||
def handle_violation_detected(self, violation):
|
||||
"""Handle a detected traffic violation"""
|
||||
try:
|
||||
# Flash red status message
|
||||
self.statusBar().showMessage(f"🚨 RED LIGHT VIOLATION DETECTED - Vehicle ID: {violation['track_id']}", 5000)
|
||||
|
||||
# Add to violations tab
|
||||
self.violations_tab.add_violation(violation)
|
||||
|
||||
# Update analytics
|
||||
if self.analytics_controller:
|
||||
self.analytics_controller.register_violation(violation)
|
||||
|
||||
print(f"🚨 Violation processed: {violation['id']} at {violation['timestamp']}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error handling violation: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
41
qt_app_pyside1/finale/splash.py
Normal file
41
qt_app_pyside1/finale/splash.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from PySide6.QtWidgets import QApplication, QSplashScreen
|
||||
from PySide6.QtCore import Qt, QTimer
|
||||
from PySide6.QtGui import QPixmap
|
||||
import sys
|
||||
import os
|
||||
|
||||
def show_splash(existing_app=None):
|
||||
# Use existing app if provided, otherwise create a new one
|
||||
app = existing_app or QApplication(sys.argv)
|
||||
|
||||
# Get the directory of the executable or script
|
||||
if getattr(sys, 'frozen', False):
|
||||
# Running as compiled executable
|
||||
app_dir = os.path.dirname(sys.executable)
|
||||
else:
|
||||
# Running as script
|
||||
app_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Look for splash image
|
||||
splash_image = os.path.join(app_dir, 'resources', 'splash.png')
|
||||
if not os.path.exists(splash_image):
|
||||
splash_image = os.path.join(app_dir, 'splash.png')
|
||||
if not os.path.exists(splash_image):
|
||||
return None
|
||||
|
||||
# Create splash screen
|
||||
pixmap = QPixmap(splash_image)
|
||||
splash = QSplashScreen(pixmap, Qt.WindowStaysOnTopHint)
|
||||
splash.show()
|
||||
app.processEvents()
|
||||
|
||||
return splash, app
|
||||
|
||||
if __name__ == "__main__":
|
||||
# This is for testing the splash screen independently
|
||||
splash, app = show_splash()
|
||||
|
||||
# Close the splash after 3 seconds
|
||||
QTimer.singleShot(3000, splash.close)
|
||||
|
||||
sys.exit(app.exec())
|
||||
677
qt_app_pyside1/finale/styles.py
Normal file
677
qt_app_pyside1/finale/styles.py
Normal file
@@ -0,0 +1,677 @@
|
||||
"""
|
||||
Modern Dark Theme and Styling System
|
||||
===================================
|
||||
|
||||
Complete styling system with Material Design 3.0 principles, dark theme,
|
||||
animations, and responsive design for the Traffic Monitoring Application.
|
||||
|
||||
Features:
|
||||
- Material Design 3.0 dark theme
|
||||
- Animated transitions and hover effects
|
||||
- Responsive typography and spacing
|
||||
- Custom widget styling
|
||||
- Accent color system
|
||||
- Professional gradients and shadows
|
||||
"""
|
||||
|
||||
from PySide6.QtCore import Qt, QPropertyAnimation, QEasingCurve, QRect, QTimer
|
||||
from PySide6.QtGui import QFont, QColor, QPalette, QLinearGradient, QBrush
|
||||
from PySide6.QtWidgets import QApplication, QWidget
|
||||
from typing import Dict, Optional
|
||||
import json
|
||||
|
||||
class Colors:
|
||||
"""Material Design 3.0 Color Palette - Dark Theme"""
|
||||
|
||||
# Primary colors
|
||||
PRIMARY_BACKGROUND = "#121212"
|
||||
SECONDARY_BACKGROUND = "#1E1E1E"
|
||||
SURFACE = "#2C2C2C"
|
||||
SURFACE_VARIANT = "#383838"
|
||||
|
||||
# Accent colors
|
||||
ACCENT_CYAN = "#00BCD4"
|
||||
ACCENT_GREEN = "#4CAF50"
|
||||
ACCENT_RED = "#FF5722"
|
||||
ACCENT_YELLOW = "#FFC107"
|
||||
ACCENT_BLUE = "#2196F3"
|
||||
ACCENT_PURPLE = "#9C27B0"
|
||||
|
||||
# Text colors
|
||||
TEXT_PRIMARY = "#FFFFFF"
|
||||
TEXT_SECONDARY = "#B0B0B0"
|
||||
TEXT_DISABLED = "#757575"
|
||||
|
||||
# State colors
|
||||
SUCCESS = "#4CAF50"
|
||||
WARNING = "#FF9800"
|
||||
ERROR = "#F44336"
|
||||
INFO = "#2196F3"
|
||||
|
||||
# Border and divider
|
||||
BORDER = "#424242"
|
||||
DIVIDER = "#2C2C2C"
|
||||
|
||||
# Interactive states
|
||||
HOVER = "#404040"
|
||||
PRESSED = "#505050"
|
||||
SELECTED = "#1976D2"
|
||||
FOCUS = "#03DAC6"
|
||||
|
||||
class Fonts:
|
||||
"""Typography system with hierarchy"""
|
||||
|
||||
@staticmethod
|
||||
def get_font(size: int = 10, weight: str = "normal", family: str = "Segoe UI") -> QFont:
|
||||
"""Get a font with specified parameters"""
|
||||
font = QFont(family, size)
|
||||
|
||||
weight_map = {
|
||||
"light": QFont.Weight.Light,
|
||||
"normal": QFont.Weight.Normal,
|
||||
"medium": QFont.Weight.Medium,
|
||||
"semibold": QFont.Weight.DemiBold,
|
||||
"bold": QFont.Weight.Bold
|
||||
}
|
||||
|
||||
font.setWeight(weight_map.get(weight, QFont.Weight.Normal))
|
||||
return font
|
||||
|
||||
@staticmethod
|
||||
def heading_1() -> QFont:
|
||||
return Fonts.get_font(24, "bold")
|
||||
|
||||
@staticmethod
|
||||
def heading_2() -> QFont:
|
||||
return Fonts.get_font(20, "semibold")
|
||||
|
||||
@staticmethod
|
||||
def heading_3() -> QFont:
|
||||
return Fonts.get_font(16, "semibold")
|
||||
|
||||
@staticmethod
|
||||
def body_large() -> QFont:
|
||||
return Fonts.get_font(14, "normal")
|
||||
|
||||
@staticmethod
|
||||
def body_medium() -> QFont:
|
||||
return Fonts.get_font(12, "normal")
|
||||
|
||||
@staticmethod
|
||||
def body_small() -> QFont:
|
||||
return Fonts.get_font(10, "normal")
|
||||
|
||||
@staticmethod
|
||||
def caption() -> QFont:
|
||||
return Fonts.get_font(9, "normal")
|
||||
|
||||
@staticmethod
|
||||
def button() -> QFont:
|
||||
return Fonts.get_font(12, "medium")
|
||||
|
||||
class Spacing:
|
||||
"""Consistent spacing system"""
|
||||
XS = 4
|
||||
SM = 8
|
||||
MD = 16
|
||||
LG = 24
|
||||
XL = 32
|
||||
XXL = 48
|
||||
|
||||
class BorderRadius:
|
||||
"""Border radius system"""
|
||||
SM = 4
|
||||
MD = 8
|
||||
LG = 12
|
||||
XL = 16
|
||||
PILL = 9999
|
||||
|
||||
class ThemeManager:
|
||||
"""Manages application theme and styling"""
|
||||
|
||||
def __init__(self, accent_color: str = Colors.ACCENT_CYAN):
|
||||
self.accent_color = accent_color
|
||||
self._setup_palette()
|
||||
|
||||
def _setup_palette(self):
|
||||
"""Setup Qt application palette"""
|
||||
palette = QPalette()
|
||||
|
||||
# Window colors
|
||||
palette.setColor(QPalette.Window, QColor(Colors.PRIMARY_BACKGROUND))
|
||||
palette.setColor(QPalette.WindowText, QColor(Colors.TEXT_PRIMARY))
|
||||
|
||||
# Base colors (input fields)
|
||||
palette.setColor(QPalette.Base, QColor(Colors.SURFACE))
|
||||
palette.setColor(QPalette.Text, QColor(Colors.TEXT_PRIMARY))
|
||||
|
||||
# Button colors
|
||||
palette.setColor(QPalette.Button, QColor(Colors.SURFACE))
|
||||
palette.setColor(QPalette.ButtonText, QColor(Colors.TEXT_PRIMARY))
|
||||
|
||||
# Highlight colors
|
||||
palette.setColor(QPalette.Highlight, QColor(self.accent_color))
|
||||
palette.setColor(QPalette.HighlightedText, QColor(Colors.TEXT_PRIMARY))
|
||||
|
||||
# Apply palette
|
||||
if QApplication.instance():
|
||||
QApplication.instance().setPalette(palette)
|
||||
|
||||
def set_accent_color(self, color: str):
|
||||
"""Change the accent color"""
|
||||
self.accent_color = color
|
||||
self._setup_palette()
|
||||
|
||||
class StyleSheets:
|
||||
"""Collection of Qt StyleSheets for various components"""
|
||||
|
||||
@staticmethod
|
||||
def main_window() -> str:
|
||||
return f"""
|
||||
QMainWindow {{
|
||||
background-color: {Colors.PRIMARY_BACKGROUND};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
}}
|
||||
|
||||
QMainWindow::separator {{
|
||||
background-color: {Colors.BORDER};
|
||||
width: 1px;
|
||||
height: 1px;
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def tab_widget() -> str:
|
||||
return f"""
|
||||
QTabWidget::pane {{
|
||||
border: 1px solid {Colors.BORDER};
|
||||
background-color: {Colors.SECONDARY_BACKGROUND};
|
||||
border-radius: {BorderRadius.MD}px;
|
||||
}}
|
||||
|
||||
QTabBar::tab {{
|
||||
background-color: {Colors.SURFACE};
|
||||
color: {Colors.TEXT_SECONDARY};
|
||||
padding: {Spacing.SM}px {Spacing.MD}px;
|
||||
margin-right: 2px;
|
||||
border-top-left-radius: {BorderRadius.SM}px;
|
||||
border-top-right-radius: {BorderRadius.SM}px;
|
||||
font-weight: 500;
|
||||
min-width: 100px;
|
||||
}}
|
||||
|
||||
QTabBar::tab:selected {{
|
||||
background-color: {Colors.ACCENT_CYAN};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
}}
|
||||
|
||||
QTabBar::tab:hover:!selected {{
|
||||
background-color: {Colors.HOVER};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def button_primary() -> str:
|
||||
return f"""
|
||||
QPushButton {{
|
||||
background-color: {Colors.ACCENT_CYAN};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
border: none;
|
||||
padding: {Spacing.SM}px {Spacing.MD}px;
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
font-weight: 500;
|
||||
min-height: 32px;
|
||||
}}
|
||||
|
||||
QPushButton:hover {{
|
||||
background-color: #00ACC1;
|
||||
}}
|
||||
|
||||
QPushButton:pressed {{
|
||||
background-color: #0097A7;
|
||||
}}
|
||||
|
||||
QPushButton:disabled {{
|
||||
background-color: {Colors.SURFACE};
|
||||
color: {Colors.TEXT_DISABLED};
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def button_secondary() -> str:
|
||||
return f"""
|
||||
QPushButton {{
|
||||
background-color: transparent;
|
||||
color: {Colors.ACCENT_CYAN};
|
||||
border: 2px solid {Colors.ACCENT_CYAN};
|
||||
padding: {Spacing.SM}px {Spacing.MD}px;
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
font-weight: 500;
|
||||
min-height: 32px;
|
||||
}}
|
||||
|
||||
QPushButton:hover {{
|
||||
background-color: rgba(0, 188, 212, 0.1);
|
||||
}}
|
||||
|
||||
QPushButton:pressed {{
|
||||
background-color: rgba(0, 188, 212, 0.2);
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def card() -> str:
|
||||
return f"""
|
||||
QWidget {{
|
||||
background-color: {Colors.SURFACE};
|
||||
border: 1px solid {Colors.BORDER};
|
||||
border-radius: {BorderRadius.MD}px;
|
||||
padding: {Spacing.MD}px;
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def input_field() -> str:
|
||||
return f"""
|
||||
QLineEdit, QTextEdit, QSpinBox, QDoubleSpinBox, QComboBox {{
|
||||
background-color: {Colors.SURFACE};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
border: 2px solid {Colors.BORDER};
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
padding: {Spacing.SM}px;
|
||||
font-size: 12px;
|
||||
}}
|
||||
|
||||
QLineEdit:focus, QTextEdit:focus, QSpinBox:focus,
|
||||
QDoubleSpinBox:focus, QComboBox:focus {{
|
||||
border-color: {Colors.ACCENT_CYAN};
|
||||
}}
|
||||
|
||||
QLineEdit:hover, QTextEdit:hover, QSpinBox:hover,
|
||||
QDoubleSpinBox:hover, QComboBox:hover {{
|
||||
border-color: {Colors.HOVER};
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def table() -> str:
|
||||
return f"""
|
||||
QTableWidget {{
|
||||
background-color: {Colors.SURFACE};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
gridline-color: {Colors.BORDER};
|
||||
border: 1px solid {Colors.BORDER};
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
}}
|
||||
|
||||
QTableWidget::item {{
|
||||
padding: {Spacing.SM}px;
|
||||
border-bottom: 1px solid {Colors.BORDER};
|
||||
}}
|
||||
|
||||
QTableWidget::item:selected {{
|
||||
background-color: {Colors.SELECTED};
|
||||
}}
|
||||
|
||||
QTableWidget::item:hover {{
|
||||
background-color: {Colors.HOVER};
|
||||
}}
|
||||
|
||||
QHeaderView::section {{
|
||||
background-color: {Colors.SURFACE_VARIANT};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
padding: {Spacing.SM}px;
|
||||
border: none;
|
||||
font-weight: 600;
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def scroll_bar() -> str:
|
||||
return f"""
|
||||
QScrollBar:vertical {{
|
||||
background-color: {Colors.SURFACE};
|
||||
width: 12px;
|
||||
border-radius: 6px;
|
||||
}}
|
||||
|
||||
QScrollBar::handle:vertical {{
|
||||
background-color: {Colors.BORDER};
|
||||
border-radius: 6px;
|
||||
min-height: 20px;
|
||||
}}
|
||||
|
||||
QScrollBar::handle:vertical:hover {{
|
||||
background-color: {Colors.HOVER};
|
||||
}}
|
||||
|
||||
QScrollBar::add-line:vertical, QScrollBar::sub-line:vertical {{
|
||||
height: 0px;
|
||||
}}
|
||||
|
||||
QScrollBar:horizontal {{
|
||||
background-color: {Colors.SURFACE};
|
||||
height: 12px;
|
||||
border-radius: 6px;
|
||||
}}
|
||||
|
||||
QScrollBar::handle:horizontal {{
|
||||
background-color: {Colors.BORDER};
|
||||
border-radius: 6px;
|
||||
min-width: 20px;
|
||||
}}
|
||||
|
||||
QScrollBar::handle:horizontal:hover {{
|
||||
background-color: {Colors.HOVER};
|
||||
}}
|
||||
|
||||
QScrollBar::add-line:horizontal, QScrollBar::sub-line:horizontal {{
|
||||
width: 0px;
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def progress_bar() -> str:
|
||||
return f"""
|
||||
QProgressBar {{
|
||||
background-color: {Colors.SURFACE};
|
||||
border: none;
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
text-align: center;
|
||||
height: 8px;
|
||||
}}
|
||||
|
||||
QProgressBar::chunk {{
|
||||
background-color: {Colors.ACCENT_CYAN};
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def status_bar() -> str:
|
||||
return f"""
|
||||
QStatusBar {{
|
||||
background-color: {Colors.SURFACE_VARIANT};
|
||||
color: {Colors.TEXT_SECONDARY};
|
||||
border-top: 1px solid {Colors.BORDER};
|
||||
padding: {Spacing.SM}px;
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def toolbar() -> str:
|
||||
return f"""
|
||||
QToolBar {{
|
||||
background-color: {Colors.SURFACE_VARIANT};
|
||||
border: none;
|
||||
spacing: {Spacing.SM}px;
|
||||
padding: {Spacing.SM}px;
|
||||
}}
|
||||
|
||||
QToolButton {{
|
||||
background-color: transparent;
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
border: none;
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
padding: {Spacing.SM}px;
|
||||
min-width: 36px;
|
||||
min-height: 36px;
|
||||
}}
|
||||
|
||||
QToolButton:hover {{
|
||||
background-color: {Colors.HOVER};
|
||||
}}
|
||||
|
||||
QToolButton:pressed {{
|
||||
background-color: {Colors.PRESSED};
|
||||
}}
|
||||
|
||||
QToolButton:checked {{
|
||||
background-color: {Colors.ACCENT_CYAN};
|
||||
}}
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def dock_widget() -> str:
|
||||
return f"""
|
||||
QDockWidget {{
|
||||
background-color: {Colors.SECONDARY_BACKGROUND};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
titlebar-close-icon: none;
|
||||
titlebar-normal-icon: none;
|
||||
}}
|
||||
|
||||
QDockWidget::title {{
|
||||
background-color: {Colors.SURFACE_VARIANT};
|
||||
padding: {Spacing.SM}px;
|
||||
font-weight: 600;
|
||||
}}
|
||||
"""
|
||||
|
||||
class AnimationManager:
|
||||
"""Manages UI animations and transitions"""
|
||||
|
||||
@staticmethod
|
||||
def create_fade_animation(widget: QWidget, duration: int = 300) -> QPropertyAnimation:
|
||||
"""Create a fade in/out animation"""
|
||||
animation = QPropertyAnimation(widget, b"windowOpacity")
|
||||
animation.setDuration(duration)
|
||||
animation.setEasingCurve(QEasingCurve.InOutQuad)
|
||||
return animation
|
||||
|
||||
@staticmethod
|
||||
def create_slide_animation(widget: QWidget, start_pos: QRect, end_pos: QRect, duration: int = 300) -> QPropertyAnimation:
|
||||
"""Create a slide animation"""
|
||||
animation = QPropertyAnimation(widget, b"geometry")
|
||||
animation.setDuration(duration)
|
||||
animation.setStartValue(start_pos)
|
||||
animation.setEndValue(end_pos)
|
||||
animation.setEasingCurve(QEasingCurve.OutCubic)
|
||||
return animation
|
||||
|
||||
@staticmethod
|
||||
def pulse_widget(widget: QWidget, duration: int = 1000):
|
||||
"""Create a pulsing effect on a widget"""
|
||||
animation = QPropertyAnimation(widget, b"windowOpacity")
|
||||
animation.setDuration(duration)
|
||||
animation.setStartValue(1.0)
|
||||
animation.setKeyValueAt(0.5, 0.5)
|
||||
animation.setEndValue(1.0)
|
||||
animation.setEasingCurve(QEasingCurve.InOutSine)
|
||||
animation.setLoopCount(-1) # Infinite loop
|
||||
animation.start()
|
||||
return animation
|
||||
|
||||
def apply_theme(app: QApplication, theme_manager: Optional[ThemeManager] = None):
|
||||
"""Apply the complete theme to the application"""
|
||||
if not theme_manager:
|
||||
theme_manager = ThemeManager()
|
||||
|
||||
# Set application style
|
||||
app.setStyle("Fusion")
|
||||
|
||||
# Apply global stylesheet
|
||||
global_style = f"""
|
||||
* {{
|
||||
font-family: "Segoe UI", "Inter", "Roboto", sans-serif;
|
||||
}}
|
||||
|
||||
{StyleSheets.main_window()}
|
||||
{StyleSheets.tab_widget()}
|
||||
{StyleSheets.input_field()}
|
||||
{StyleSheets.table()}
|
||||
{StyleSheets.scroll_bar()}
|
||||
{StyleSheets.progress_bar()}
|
||||
{StyleSheets.status_bar()}
|
||||
{StyleSheets.toolbar()}
|
||||
{StyleSheets.dock_widget()}
|
||||
|
||||
QWidget {{
|
||||
background-color: {Colors.PRIMARY_BACKGROUND};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
}}
|
||||
|
||||
QGroupBox {{
|
||||
background-color: {Colors.SURFACE};
|
||||
border: 1px solid {Colors.BORDER};
|
||||
border-radius: {BorderRadius.MD}px;
|
||||
margin-top: {Spacing.MD}px;
|
||||
padding-top: {Spacing.SM}px;
|
||||
font-weight: 600;
|
||||
}}
|
||||
|
||||
QGroupBox::title {{
|
||||
subcontrol-origin: margin;
|
||||
left: {Spacing.MD}px;
|
||||
padding: 0 {Spacing.SM}px 0 {Spacing.SM}px;
|
||||
}}
|
||||
|
||||
QCheckBox, QRadioButton {{
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
spacing: {Spacing.SM}px;
|
||||
}}
|
||||
|
||||
QCheckBox::indicator, QRadioButton::indicator {{
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
border: 2px solid {Colors.BORDER};
|
||||
border-radius: 4px;
|
||||
background-color: {Colors.SURFACE};
|
||||
}}
|
||||
|
||||
QCheckBox::indicator:checked, QRadioButton::indicator:checked {{
|
||||
background-color: {Colors.ACCENT_CYAN};
|
||||
border-color: {Colors.ACCENT_CYAN};
|
||||
}}
|
||||
|
||||
QSlider::groove:horizontal {{
|
||||
height: 6px;
|
||||
background-color: {Colors.SURFACE};
|
||||
border-radius: 3px;
|
||||
}}
|
||||
|
||||
QSlider::handle:horizontal {{
|
||||
background-color: {Colors.ACCENT_CYAN};
|
||||
border: none;
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
border-radius: 9px;
|
||||
margin: -6px 0;
|
||||
}}
|
||||
|
||||
QSlider::sub-page:horizontal {{
|
||||
background-color: {Colors.ACCENT_CYAN};
|
||||
border-radius: 3px;
|
||||
}}
|
||||
|
||||
QMenu {{
|
||||
background-color: {Colors.SURFACE};
|
||||
color: {Colors.TEXT_PRIMARY};
|
||||
border: 1px solid {Colors.BORDER};
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
padding: {Spacing.SM}px;
|
||||
}}
|
||||
|
||||
QMenu::item {{
|
||||
padding: {Spacing.SM}px {Spacing.MD}px;
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
}}
|
||||
|
||||
QMenu::item:selected {{
|
||||
background-color: {Colors.HOVER};
|
||||
}}
|
||||
|
||||
QMenu::separator {{
|
||||
height: 1px;
|
||||
background-color: {Colors.BORDER};
|
||||
margin: {Spacing.SM}px;
|
||||
}}
|
||||
|
||||
QSplitter::handle {{
|
||||
background-color: {Colors.BORDER};
|
||||
}}
|
||||
|
||||
QSplitter::handle:horizontal {{
|
||||
width: 2px;
|
||||
}}
|
||||
|
||||
QSplitter::handle:vertical {{
|
||||
height: 2px;
|
||||
}}
|
||||
"""
|
||||
|
||||
app.setStyleSheet(global_style)
|
||||
|
||||
# Utility functions for common styling patterns
|
||||
def create_stat_card_style(accent_color: str = Colors.ACCENT_CYAN) -> str:
|
||||
"""Create a styled card for statistics display"""
|
||||
return f"""
|
||||
QWidget {{
|
||||
background-color: {Colors.SURFACE};
|
||||
border: 1px solid {Colors.BORDER};
|
||||
border-left: 4px solid {accent_color};
|
||||
border-radius: {BorderRadius.MD}px;
|
||||
padding: {Spacing.MD}px;
|
||||
}}
|
||||
|
||||
QLabel {{
|
||||
background-color: transparent;
|
||||
border: none;
|
||||
}}
|
||||
"""
|
||||
|
||||
def create_alert_style(alert_type: str = "info") -> str:
|
||||
"""Create styled alert components"""
|
||||
color_map = {
|
||||
"success": Colors.SUCCESS,
|
||||
"warning": Colors.WARNING,
|
||||
"error": Colors.ERROR,
|
||||
"info": Colors.INFO
|
||||
}
|
||||
|
||||
color = color_map.get(alert_type, Colors.INFO)
|
||||
|
||||
return f"""
|
||||
QWidget {{
|
||||
background-color: rgba({int(color[1:3], 16)}, {int(color[3:5], 16)}, {int(color[5:7], 16)}, 0.1);
|
||||
border: 1px solid {color};
|
||||
border-radius: {BorderRadius.SM}px;
|
||||
padding: {Spacing.MD}px;
|
||||
}}
|
||||
|
||||
QLabel {{
|
||||
color: {color};
|
||||
background-color: transparent;
|
||||
border: none;
|
||||
font-weight: 500;
|
||||
}}
|
||||
"""
|
||||
|
||||
class MaterialColors:
|
||||
"""Alias for Colors for compatibility with old code."""
|
||||
primary = Colors.ACCENT_CYAN
|
||||
primary_variant = Colors.ACCENT_BLUE
|
||||
secondary = Colors.ACCENT_GREEN
|
||||
surface = Colors.SURFACE
|
||||
text_primary = Colors.TEXT_PRIMARY
|
||||
text_on_primary = Colors.TEXT_PRIMARY
|
||||
|
||||
class FinaleStyles:
|
||||
"""Basic style helpers for compatibility with old code."""
|
||||
@staticmethod
|
||||
def get_group_box_style():
|
||||
return """
|
||||
QGroupBox {
|
||||
border: 1px solid #424242;
|
||||
border-radius: 8px;
|
||||
margin-top: 8px;
|
||||
background-color: #232323;
|
||||
}
|
||||
QGroupBox:title {
|
||||
subcontrol-origin: margin;
|
||||
left: 10px;
|
||||
padding: 0 3px 0 3px;
|
||||
color: #B0B0B0;
|
||||
}
|
||||
"""
|
||||
476
qt_app_pyside1/finale/views/analytics_view.py
Normal file
476
qt_app_pyside1/finale/views/analytics_view.py
Normal file
@@ -0,0 +1,476 @@
|
||||
"""
|
||||
Analytics View - Traffic analytics and reporting
|
||||
Displays charts, statistics, and historical data.
|
||||
"""
|
||||
|
||||
from PySide6.QtWidgets import (
|
||||
QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton,
|
||||
QGroupBox, QGridLayout, QFrame, QScrollArea, QTabWidget,
|
||||
QTableWidget, QTableWidgetItem, QHeaderView, QDateEdit,
|
||||
QComboBox, QSpinBox
|
||||
)
|
||||
from PySide6.QtCore import Qt, Signal, Slot, QTimer, QDate
|
||||
from PySide6.QtGui import QPixmap, QPainter, QBrush, QColor, QFont
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
import json
|
||||
|
||||
# Import finale components
|
||||
try:
|
||||
# Try relative imports first (when running as a package)
|
||||
from ..styles import FinaleStyles, MaterialColors
|
||||
from ..icons import FinaleIcons
|
||||
# Import advanced chart components from original analytics_tab
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Add parent directory to path to import from qt_app_pyside
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||
from qt_app_pyside.ui.analytics_tab import ChartWidget, TimeSeriesChart, DetectionPieChart, ViolationBarChart
|
||||
from qt_app_pyside.controllers.analytics_controller import AnalyticsController
|
||||
from qt_app_pyside.utils.helpers import load_configuration, format_timestamp, format_duration
|
||||
except ImportError:
|
||||
# Fallback for direct execution
|
||||
try:
|
||||
from styles import FinaleStyles, MaterialColors
|
||||
from icons import FinaleIcons
|
||||
# Create simplified chart widgets if advanced ones not available
|
||||
except ImportError:
|
||||
print("Error importing analytics components")
|
||||
class ChartWidget(QWidget):
|
||||
def __init__(self, title="Chart"):
|
||||
super().__init__()
|
||||
self.title = title
|
||||
self.data = []
|
||||
self.chart_type = "line" # line, bar, pie
|
||||
self.setMinimumSize(400, 300)
|
||||
|
||||
def set_data(self, data, chart_type="line"):
|
||||
"""Set chart data and type"""
|
||||
self.data = data
|
||||
self.chart_type = chart_type
|
||||
self.update()
|
||||
|
||||
def paintEvent(self, event):
|
||||
"""Paint the chart"""
|
||||
painter = QPainter(self)
|
||||
painter.setRenderHint(QPainter.Antialiasing)
|
||||
|
||||
# Background
|
||||
painter.fillRect(self.rect(), QColor(MaterialColors.surface))
|
||||
|
||||
# Border
|
||||
painter.setPen(QColor(MaterialColors.outline))
|
||||
painter.drawRect(self.rect().adjusted(0, 0, -1, -1))
|
||||
|
||||
# Title
|
||||
painter.setPen(QColor(MaterialColors.text_primary))
|
||||
painter.setFont(QFont("Segoe UI", 12, QFont.Bold))
|
||||
title_rect = self.rect().adjusted(10, 10, -10, -10)
|
||||
painter.drawText(title_rect, Qt.AlignTop | Qt.AlignLeft, self.title)
|
||||
|
||||
# Chart area
|
||||
chart_rect = self.rect().adjusted(50, 50, -20, -50)
|
||||
|
||||
if not self.data:
|
||||
# No data message
|
||||
painter.setPen(QColor(MaterialColors.text_secondary))
|
||||
painter.setFont(QFont("Segoe UI", 10))
|
||||
painter.drawText(chart_rect, Qt.AlignCenter, "No data available")
|
||||
return
|
||||
|
||||
# Draw chart based on type
|
||||
if self.chart_type == "line":
|
||||
self.draw_line_chart(painter, chart_rect)
|
||||
elif self.chart_type == "bar":
|
||||
self.draw_bar_chart(painter, chart_rect)
|
||||
elif self.chart_type == "pie":
|
||||
self.draw_pie_chart(painter, chart_rect)
|
||||
|
||||
def draw_line_chart(self, painter, rect):
|
||||
"""Draw a line chart"""
|
||||
if len(self.data) < 2:
|
||||
return
|
||||
|
||||
# Find min/max values
|
||||
values = [item.get('value', 0) for item in self.data]
|
||||
min_val, max_val = min(values), max(values)
|
||||
|
||||
if max_val == min_val:
|
||||
max_val = min_val + 1
|
||||
|
||||
# Calculate points
|
||||
points = []
|
||||
for i, item in enumerate(self.data):
|
||||
x = rect.left() + (i / (len(self.data) - 1)) * rect.width()
|
||||
y = rect.bottom() - ((item.get('value', 0) - min_val) / (max_val - min_val)) * rect.height()
|
||||
points.append((x, y))
|
||||
|
||||
# Draw grid lines
|
||||
painter.setPen(QColor(MaterialColors.outline_variant))
|
||||
for i in range(5):
|
||||
y = rect.top() + (i / 4) * rect.height()
|
||||
painter.drawLine(rect.left(), y, rect.right(), y)
|
||||
|
||||
# Draw line
|
||||
painter.setPen(QColor(MaterialColors.primary))
|
||||
for i in range(len(points) - 1):
|
||||
painter.drawLine(points[i][0], points[i][1], points[i+1][0], points[i+1][1])
|
||||
|
||||
# Draw points
|
||||
painter.setBrush(QBrush(QColor(MaterialColors.primary)))
|
||||
for x, y in points:
|
||||
painter.drawEllipse(x-3, y-3, 6, 6)
|
||||
|
||||
def draw_bar_chart(self, painter, rect):
|
||||
"""Draw a bar chart"""
|
||||
if not self.data:
|
||||
return
|
||||
|
||||
values = [item.get('value', 0) for item in self.data]
|
||||
max_val = max(values) if values else 1
|
||||
|
||||
bar_width = rect.width() / len(self.data) * 0.8
|
||||
spacing = rect.width() / len(self.data) * 0.2
|
||||
|
||||
painter.setBrush(QBrush(QColor(MaterialColors.primary)))
|
||||
|
||||
for i, item in enumerate(self.data):
|
||||
value = item.get('value', 0)
|
||||
height = (value / max_val) * rect.height()
|
||||
|
||||
x = rect.left() + i * (bar_width + spacing) + spacing / 2
|
||||
y = rect.bottom() - height
|
||||
|
||||
painter.drawRect(x, y, bar_width, height)
|
||||
|
||||
def draw_pie_chart(self, painter, rect):
|
||||
"""Draw a pie chart"""
|
||||
if not self.data:
|
||||
return
|
||||
|
||||
total = sum(item.get('value', 0) for item in self.data)
|
||||
if total == 0:
|
||||
return
|
||||
|
||||
# Calculate center and radius
|
||||
center = rect.center()
|
||||
radius = min(rect.width(), rect.height()) // 2 - 20
|
||||
|
||||
# Colors for pie slices
|
||||
colors = [MaterialColors.primary, MaterialColors.secondary, MaterialColors.tertiary,
|
||||
MaterialColors.error, MaterialColors.success, MaterialColors.warning]
|
||||
|
||||
start_angle = 0
|
||||
for i, item in enumerate(self.data):
|
||||
value = item.get('value', 0)
|
||||
angle = (value / total) * 360 * 16 # Qt uses 16ths of a degree
|
||||
|
||||
color = QColor(colors[i % len(colors)])
|
||||
painter.setBrush(QBrush(color))
|
||||
painter.setPen(QColor(MaterialColors.outline))
|
||||
|
||||
painter.drawPie(center.x() - radius, center.y() - radius,
|
||||
radius * 2, radius * 2, start_angle, angle)
|
||||
|
||||
start_angle += angle
|
||||
|
||||
class TrafficSummaryWidget(QGroupBox):
|
||||
"""
|
||||
Widget showing traffic summary statistics.
|
||||
"""
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__("Traffic Summary", parent)
|
||||
self.setup_ui()
|
||||
self.reset_stats()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup summary UI"""
|
||||
layout = QGridLayout(self)
|
||||
|
||||
# Create stat labels
|
||||
self.total_vehicles_label = QLabel("0")
|
||||
self.total_violations_label = QLabel("0")
|
||||
self.avg_speed_label = QLabel("0.0 km/h")
|
||||
self.peak_hour_label = QLabel("N/A")
|
||||
|
||||
# Style the stat values
|
||||
for label in [self.total_vehicles_label, self.total_violations_label,
|
||||
self.avg_speed_label, self.peak_hour_label]:
|
||||
label.setFont(QFont("Segoe UI", 16, QFont.Bold))
|
||||
label.setStyleSheet(f"color: {MaterialColors.primary};")
|
||||
|
||||
# Add to layout
|
||||
layout.addWidget(QLabel("Total Vehicles:"), 0, 0)
|
||||
layout.addWidget(self.total_vehicles_label, 0, 1)
|
||||
|
||||
layout.addWidget(QLabel("Total Violations:"), 1, 0)
|
||||
layout.addWidget(self.total_violations_label, 1, 1)
|
||||
|
||||
layout.addWidget(QLabel("Average Speed:"), 2, 0)
|
||||
layout.addWidget(self.avg_speed_label, 2, 1)
|
||||
|
||||
layout.addWidget(QLabel("Peak Hour:"), 3, 0)
|
||||
layout.addWidget(self.peak_hour_label, 3, 1)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
|
||||
def reset_stats(self):
|
||||
"""Reset all statistics"""
|
||||
self.total_vehicles_label.setText("0")
|
||||
self.total_violations_label.setText("0")
|
||||
self.avg_speed_label.setText("0.0 km/h")
|
||||
self.peak_hour_label.setText("N/A")
|
||||
|
||||
def update_stats(self, stats):
|
||||
"""Update statistics display"""
|
||||
if 'total_vehicles' in stats:
|
||||
self.total_vehicles_label.setText(str(stats['total_vehicles']))
|
||||
|
||||
if 'total_violations' in stats:
|
||||
self.total_violations_label.setText(str(stats['total_violations']))
|
||||
|
||||
if 'avg_speed' in stats:
|
||||
self.avg_speed_label.setText(f"{stats['avg_speed']:.1f} km/h")
|
||||
|
||||
if 'peak_hour' in stats:
|
||||
self.peak_hour_label.setText(stats['peak_hour'])
|
||||
|
||||
class ViolationsTableWidget(QTableWidget):
|
||||
"""
|
||||
Table widget for displaying violation records.
|
||||
"""
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setup_table()
|
||||
|
||||
def setup_table(self):
|
||||
"""Setup the violations table"""
|
||||
# Set columns
|
||||
columns = ["Time", "Type", "Vehicle", "Location", "Confidence", "Actions"]
|
||||
self.setColumnCount(len(columns))
|
||||
self.setHorizontalHeaderLabels(columns)
|
||||
|
||||
# Configure table
|
||||
self.horizontalHeader().setStretchLastSection(True)
|
||||
self.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
|
||||
self.setSelectionBehavior(QTableWidget.SelectRows)
|
||||
self.setAlternatingRowColors(True)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_table_style())
|
||||
|
||||
def add_violation(self, violation_data):
|
||||
"""Add a violation record to the table"""
|
||||
row = self.rowCount()
|
||||
self.insertRow(row)
|
||||
|
||||
# Populate row data
|
||||
time_str = violation_data.get('timestamp', datetime.now().strftime('%H:%M:%S'))
|
||||
violation_type = violation_data.get('type', 'Red Light')
|
||||
vehicle_id = violation_data.get('vehicle_id', 'Unknown')
|
||||
location = violation_data.get('location', 'Intersection 1')
|
||||
confidence = violation_data.get('confidence', 0.0)
|
||||
|
||||
self.setItem(row, 0, QTableWidgetItem(time_str))
|
||||
self.setItem(row, 1, QTableWidgetItem(violation_type))
|
||||
self.setItem(row, 2, QTableWidgetItem(vehicle_id))
|
||||
self.setItem(row, 3, QTableWidgetItem(location))
|
||||
self.setItem(row, 4, QTableWidgetItem(f"{confidence:.2f}"))
|
||||
|
||||
# Actions button
|
||||
actions_btn = QPushButton("View Details")
|
||||
actions_btn.clicked.connect(lambda: self.view_violation_details(violation_data))
|
||||
self.setCellWidget(row, 5, actions_btn)
|
||||
|
||||
# Auto-scroll to new violation
|
||||
self.scrollToBottom()
|
||||
|
||||
def view_violation_details(self, violation_data):
|
||||
"""View detailed violation information"""
|
||||
# This could open a detailed dialog
|
||||
print(f"Viewing violation details: {violation_data}")
|
||||
|
||||
class AnalyticsView(QWidget):
|
||||
"""
|
||||
Main analytics view with charts, statistics, and violation history.
|
||||
"""
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.analytics_controller = AnalyticsController()
|
||||
self.setup_ui()
|
||||
self.analytics_controller.data_updated.connect(self.refresh_analytics)
|
||||
# Load config if needed
|
||||
self.config = load_configuration('config.json')
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup the analytics view UI"""
|
||||
layout = QVBoxLayout(self)
|
||||
layout.setContentsMargins(16, 16, 16, 16)
|
||||
layout.setSpacing(16)
|
||||
|
||||
# Top controls
|
||||
controls_layout = QHBoxLayout()
|
||||
|
||||
# Date range selection
|
||||
controls_layout.addWidget(QLabel("Date Range:"))
|
||||
|
||||
self.start_date = QDateEdit()
|
||||
self.start_date.setDate(QDate.currentDate().addDays(-7))
|
||||
self.start_date.setCalendarPopup(True)
|
||||
controls_layout.addWidget(self.start_date)
|
||||
|
||||
controls_layout.addWidget(QLabel("to"))
|
||||
|
||||
self.end_date = QDateEdit()
|
||||
self.end_date.setDate(QDate.currentDate())
|
||||
self.end_date.setCalendarPopup(True)
|
||||
controls_layout.addWidget(self.end_date)
|
||||
|
||||
# Time interval
|
||||
controls_layout.addWidget(QLabel("Interval:"))
|
||||
self.interval_combo = QComboBox()
|
||||
self.interval_combo.addItems(["Hourly", "Daily", "Weekly"])
|
||||
controls_layout.addWidget(self.interval_combo)
|
||||
|
||||
# Refresh button
|
||||
self.refresh_btn = QPushButton(FinaleIcons.get_icon("refresh"), "Refresh")
|
||||
self.refresh_btn.clicked.connect(self.refresh_data)
|
||||
controls_layout.addWidget(self.refresh_btn)
|
||||
|
||||
controls_layout.addStretch()
|
||||
layout.addLayout(controls_layout)
|
||||
|
||||
# Main content area
|
||||
content_layout = QHBoxLayout()
|
||||
|
||||
# Left panel - Charts
|
||||
charts_widget = QWidget()
|
||||
charts_layout = QVBoxLayout(charts_widget)
|
||||
|
||||
# Traffic flow chart
|
||||
self.traffic_chart = AnalyticsChartWidget("Traffic Flow Over Time")
|
||||
charts_layout.addWidget(self.traffic_chart)
|
||||
|
||||
# Violation types chart
|
||||
self.violations_chart = AnalyticsChartWidget("Violation Types")
|
||||
charts_layout.addWidget(self.violations_chart)
|
||||
|
||||
content_layout.addWidget(charts_widget, 2)
|
||||
|
||||
# Right panel - Statistics and table
|
||||
right_panel = QVBoxLayout()
|
||||
|
||||
# Summary statistics
|
||||
self.summary_widget = TrafficSummaryWidget()
|
||||
right_panel.addWidget(self.summary_widget)
|
||||
|
||||
# Recent violations table
|
||||
violations_group = QGroupBox("Recent Violations")
|
||||
violations_layout = QVBoxLayout(violations_group)
|
||||
|
||||
self.violations_table = ViolationsTableWidget()
|
||||
violations_layout.addWidget(self.violations_table)
|
||||
|
||||
violations_group.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
right_panel.addWidget(violations_group, 1)
|
||||
|
||||
content_layout.addLayout(right_panel, 1)
|
||||
layout.addLayout(content_layout, 1)
|
||||
|
||||
# Apply theme
|
||||
self.apply_theme(True)
|
||||
|
||||
# Load initial data
|
||||
self.refresh_data()
|
||||
|
||||
@Slot()
|
||||
def refresh_data(self):
|
||||
"""Refresh analytics data"""
|
||||
print("Refreshing analytics data...")
|
||||
|
||||
# Update traffic flow chart (sample data)
|
||||
traffic_data = [
|
||||
{'label': '08:00', 'value': 45},
|
||||
{'label': '09:00', 'value': 67},
|
||||
{'label': '10:00', 'value': 89},
|
||||
{'label': '11:00', 'value': 76},
|
||||
{'label': '12:00', 'value': 92},
|
||||
{'label': '13:00', 'value': 84},
|
||||
{'label': '14:00', 'value': 71}
|
||||
]
|
||||
self.traffic_chart.set_data(traffic_data, "line")
|
||||
|
||||
# Update violations chart
|
||||
violations_data = [
|
||||
{'label': 'Red Light', 'value': 12},
|
||||
{'label': 'Speed', 'value': 8},
|
||||
{'label': 'Wrong Lane', 'value': 5},
|
||||
{'label': 'No Helmet', 'value': 3}
|
||||
]
|
||||
self.violations_chart.set_data(violations_data, "pie")
|
||||
|
||||
# Update summary
|
||||
summary_stats = {
|
||||
'total_vehicles': 1247,
|
||||
'total_violations': 28,
|
||||
'avg_speed': 35.2,
|
||||
'peak_hour': '12:00-13:00'
|
||||
}
|
||||
self.summary_widget.update_stats(summary_stats)
|
||||
|
||||
def refresh_analytics(self):
|
||||
"""Refresh analytics data from controller"""
|
||||
data = self.analytics_controller.get_analytics_data()
|
||||
# Use format_timestamp, format_duration for display
|
||||
# ... update charts and stats with new data ...
|
||||
|
||||
def update_demo_data(self):
|
||||
"""Update with demo data for demonstration"""
|
||||
import random
|
||||
|
||||
# Simulate new violation
|
||||
if random.random() < 0.3: # 30% chance
|
||||
violation = {
|
||||
'timestamp': datetime.now().strftime('%H:%M:%S'),
|
||||
'type': random.choice(['Red Light', 'Speed', 'Wrong Lane']),
|
||||
'vehicle_id': f"VH{random.randint(1000, 9999)}",
|
||||
'location': f"Intersection {random.randint(1, 5)}",
|
||||
'confidence': random.uniform(0.7, 0.95)
|
||||
}
|
||||
self.violations_table.add_violation(violation)
|
||||
|
||||
def add_violation(self, violation_data):
|
||||
"""Add a new violation (called from main window)"""
|
||||
self.violations_table.add_violation(violation_data)
|
||||
|
||||
def apply_theme(self, dark_mode=True):
|
||||
"""Apply theme to the view"""
|
||||
if dark_mode:
|
||||
self.setStyleSheet(f"""
|
||||
QWidget {{
|
||||
background-color: {MaterialColors.surface};
|
||||
color: {MaterialColors.text_primary};
|
||||
}}
|
||||
QPushButton {{
|
||||
background-color: {MaterialColors.primary};
|
||||
color: {MaterialColors.text_on_primary};
|
||||
border: none;
|
||||
border-radius: 6px;
|
||||
padding: 8px 16px;
|
||||
}}
|
||||
QPushButton:hover {{
|
||||
background-color: {MaterialColors.primary_variant};
|
||||
}}
|
||||
QDateEdit, QComboBox {{
|
||||
background-color: {MaterialColors.surface_variant};
|
||||
border: 1px solid {MaterialColors.outline};
|
||||
border-radius: 4px;
|
||||
padding: 6px;
|
||||
}}
|
||||
""")
|
||||
421
qt_app_pyside1/finale/views/live_view.py
Normal file
421
qt_app_pyside1/finale/views/live_view.py
Normal file
@@ -0,0 +1,421 @@
|
||||
"""
|
||||
Live View - Real-time detection and monitoring
|
||||
Connects to existing video controller and live detection logic.
|
||||
"""
|
||||
|
||||
from PySide6.QtWidgets import (
|
||||
QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton,
|
||||
QFileDialog, QComboBox, QSlider, QSpinBox, QGroupBox,
|
||||
QGridLayout, QFrame, QSizePolicy, QScrollArea
|
||||
)
|
||||
from PySide6.QtCore import Qt, Signal, Slot, QTimer, QSize
|
||||
from PySide6.QtGui import QPixmap, QPainter, QBrush, QColor, QFont
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
# Import finale components
|
||||
from ..styles import FinaleStyles, MaterialColors
|
||||
from ..icons import FinaleIcons
|
||||
|
||||
class VideoDisplayWidget(QLabel):
|
||||
"""
|
||||
Advanced video display widget with overlays and interactions.
|
||||
"""
|
||||
|
||||
frame_clicked = Signal(int, int) # x, y coordinates
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setMinimumSize(640, 480)
|
||||
self.setScaledContents(True)
|
||||
self.setAlignment(Qt.AlignCenter)
|
||||
self.setStyleSheet("""
|
||||
QLabel {
|
||||
border: 2px solid #424242;
|
||||
border-radius: 8px;
|
||||
background-color: #1a1a1a;
|
||||
}
|
||||
""")
|
||||
|
||||
# State
|
||||
self.current_pixmap = None
|
||||
self.overlay_enabled = True
|
||||
|
||||
# Default placeholder
|
||||
self.set_placeholder()
|
||||
|
||||
def set_placeholder(self):
|
||||
"""Set placeholder image when no video is loaded"""
|
||||
placeholder = QPixmap(640, 480)
|
||||
placeholder.fill(QColor(26, 26, 26))
|
||||
|
||||
painter = QPainter(placeholder)
|
||||
painter.setPen(QColor(117, 117, 117))
|
||||
painter.setFont(QFont("Segoe UI", 16))
|
||||
painter.drawText(placeholder.rect(), Qt.AlignCenter, "No Video Source\nClick to select a file")
|
||||
painter.end()
|
||||
|
||||
self.setPixmap(placeholder)
|
||||
|
||||
def update_frame(self, pixmap, detections=None):
|
||||
"""Update frame with detections overlay"""
|
||||
if pixmap is None:
|
||||
return
|
||||
|
||||
self.current_pixmap = pixmap
|
||||
|
||||
if self.overlay_enabled and detections:
|
||||
# Draw detection overlays
|
||||
pixmap = self.add_detection_overlay(pixmap, detections)
|
||||
|
||||
self.setPixmap(pixmap)
|
||||
|
||||
def add_detection_overlay(self, pixmap, detections):
|
||||
"""Add detection overlays to pixmap"""
|
||||
if not detections:
|
||||
return pixmap
|
||||
|
||||
# Create a copy to draw on
|
||||
overlay_pixmap = QPixmap(pixmap)
|
||||
painter = QPainter(overlay_pixmap)
|
||||
|
||||
# Draw detection boxes
|
||||
for detection in detections:
|
||||
# Extract detection info (format depends on backend)
|
||||
if isinstance(detection, dict):
|
||||
bbox = detection.get('bbox', [])
|
||||
confidence = detection.get('confidence', 0.0)
|
||||
class_name = detection.get('class', 'unknown')
|
||||
else:
|
||||
# Handle other detection formats
|
||||
continue
|
||||
|
||||
if len(bbox) >= 4:
|
||||
x1, y1, x2, y2 = bbox[:4]
|
||||
|
||||
# Draw bounding box
|
||||
painter.setPen(QColor(MaterialColors.primary))
|
||||
painter.drawRect(int(x1), int(y1), int(x2-x1), int(y2-y1))
|
||||
|
||||
# Draw label
|
||||
label = f"{class_name}: {confidence:.2f}"
|
||||
painter.setPen(QColor(MaterialColors.text_primary))
|
||||
painter.drawText(int(x1), int(y1-5), label)
|
||||
|
||||
painter.end()
|
||||
return overlay_pixmap
|
||||
|
||||
def mousePressEvent(self, event):
|
||||
"""Handle mouse click events"""
|
||||
if event.button() == Qt.LeftButton:
|
||||
self.frame_clicked.emit(event.x(), event.y())
|
||||
super().mousePressEvent(event)
|
||||
|
||||
class SourceControlWidget(QGroupBox):
|
||||
"""
|
||||
Widget for controlling video source (file, camera, stream).
|
||||
"""
|
||||
|
||||
source_changed = Signal(str) # source path/url
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__("Video Source", parent)
|
||||
self.setup_ui()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup the source control UI"""
|
||||
layout = QVBoxLayout(self)
|
||||
|
||||
# Source type selection
|
||||
source_layout = QHBoxLayout()
|
||||
|
||||
self.source_combo = QComboBox()
|
||||
self.source_combo.addItems(["Select Source", "Video File", "Camera", "RTSP Stream"])
|
||||
self.source_combo.currentTextChanged.connect(self.on_source_type_changed)
|
||||
|
||||
self.browse_btn = QPushButton(FinaleIcons.get_icon("folder"), "Browse")
|
||||
self.browse_btn.clicked.connect(self.browse_file)
|
||||
self.browse_btn.setEnabled(False)
|
||||
|
||||
source_layout.addWidget(QLabel("Type:"))
|
||||
source_layout.addWidget(self.source_combo)
|
||||
source_layout.addWidget(self.browse_btn)
|
||||
|
||||
layout.addLayout(source_layout)
|
||||
|
||||
# Source path/URL input
|
||||
path_layout = QHBoxLayout()
|
||||
|
||||
self.path_label = QLabel("Path/URL:")
|
||||
self.path_display = QLabel("No source selected")
|
||||
self.path_display.setStyleSheet("QLabel { color: #757575; font-style: italic; }")
|
||||
|
||||
path_layout.addWidget(self.path_label)
|
||||
path_layout.addWidget(self.path_display, 1)
|
||||
|
||||
layout.addLayout(path_layout)
|
||||
|
||||
# Camera settings (initially hidden)
|
||||
self.camera_widget = QWidget()
|
||||
camera_layout = QHBoxLayout(self.camera_widget)
|
||||
|
||||
camera_layout.addWidget(QLabel("Camera ID:"))
|
||||
self.camera_spin = QSpinBox()
|
||||
self.camera_spin.setRange(0, 10)
|
||||
camera_layout.addWidget(self.camera_spin)
|
||||
|
||||
camera_layout.addStretch()
|
||||
self.camera_widget.hide()
|
||||
|
||||
layout.addWidget(self.camera_widget)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
|
||||
@Slot(str)
|
||||
def on_source_type_changed(self, source_type):
|
||||
"""Handle source type change"""
|
||||
if source_type == "Video File":
|
||||
self.browse_btn.setEnabled(True)
|
||||
self.camera_widget.hide()
|
||||
elif source_type == "Camera":
|
||||
self.browse_btn.setEnabled(False)
|
||||
self.camera_widget.show()
|
||||
self.path_display.setText(f"Camera {self.camera_spin.value()}")
|
||||
self.source_changed.emit(str(self.camera_spin.value()))
|
||||
elif source_type == "RTSP Stream":
|
||||
self.browse_btn.setEnabled(False)
|
||||
self.camera_widget.hide()
|
||||
# Could add RTSP URL input here
|
||||
else:
|
||||
self.browse_btn.setEnabled(False)
|
||||
self.camera_widget.hide()
|
||||
|
||||
@Slot()
|
||||
def browse_file(self):
|
||||
"""Browse for video file"""
|
||||
file_path, _ = QFileDialog.getOpenFileName(
|
||||
self, "Select Video File", "",
|
||||
"Video Files (*.mp4 *.avi *.mov *.mkv *.wmv);;All Files (*)"
|
||||
)
|
||||
|
||||
if file_path:
|
||||
self.path_display.setText(file_path)
|
||||
self.source_changed.emit(file_path)
|
||||
|
||||
class DetectionControlWidget(QGroupBox):
|
||||
"""
|
||||
Widget for controlling detection parameters.
|
||||
"""
|
||||
|
||||
confidence_changed = Signal(float)
|
||||
nms_threshold_changed = Signal(float)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__("Detection Settings", parent)
|
||||
self.setup_ui()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup detection control UI"""
|
||||
layout = QGridLayout(self)
|
||||
|
||||
# Confidence threshold
|
||||
layout.addWidget(QLabel("Confidence:"), 0, 0)
|
||||
|
||||
self.confidence_slider = QSlider(Qt.Horizontal)
|
||||
self.confidence_slider.setRange(1, 100)
|
||||
self.confidence_slider.setValue(30)
|
||||
self.confidence_slider.valueChanged.connect(self.on_confidence_changed)
|
||||
|
||||
self.confidence_label = QLabel("0.30")
|
||||
self.confidence_label.setMinimumWidth(40)
|
||||
|
||||
layout.addWidget(self.confidence_slider, 0, 1)
|
||||
layout.addWidget(self.confidence_label, 0, 2)
|
||||
|
||||
# NMS threshold
|
||||
layout.addWidget(QLabel("NMS Threshold:"), 1, 0)
|
||||
|
||||
self.nms_slider = QSlider(Qt.Horizontal)
|
||||
self.nms_slider.setRange(1, 100)
|
||||
self.nms_slider.setValue(45)
|
||||
self.nms_slider.valueChanged.connect(self.on_nms_changed)
|
||||
|
||||
self.nms_label = QLabel("0.45")
|
||||
self.nms_label.setMinimumWidth(40)
|
||||
|
||||
layout.addWidget(self.nms_slider, 1, 1)
|
||||
layout.addWidget(self.nms_label, 1, 2)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
|
||||
@Slot(int)
|
||||
def on_confidence_changed(self, value):
|
||||
"""Handle confidence threshold change"""
|
||||
confidence = value / 100.0
|
||||
self.confidence_label.setText(f"{confidence:.2f}")
|
||||
self.confidence_changed.emit(confidence)
|
||||
|
||||
@Slot(int)
|
||||
def on_nms_changed(self, value):
|
||||
"""Handle NMS threshold change"""
|
||||
nms = value / 100.0
|
||||
self.nms_label.setText(f"{nms:.2f}")
|
||||
self.nms_threshold_changed.emit(nms)
|
||||
|
||||
class LiveView(QWidget):
|
||||
"""
|
||||
Main live detection view.
|
||||
Displays real-time video with detection overlays and controls.
|
||||
"""
|
||||
|
||||
source_changed = Signal(str)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setup_ui()
|
||||
self.current_detections = []
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup the live view UI"""
|
||||
layout = QHBoxLayout(self)
|
||||
layout.setContentsMargins(16, 16, 16, 16)
|
||||
layout.setSpacing(16)
|
||||
|
||||
# Main video display area
|
||||
video_layout = QVBoxLayout()
|
||||
|
||||
self.video_widget = VideoDisplayWidget()
|
||||
self.video_widget.frame_clicked.connect(self.on_frame_clicked)
|
||||
|
||||
video_layout.addWidget(self.video_widget, 1)
|
||||
|
||||
# Video controls
|
||||
controls_layout = QHBoxLayout()
|
||||
|
||||
self.play_btn = QPushButton(FinaleIcons.get_icon("play"), "")
|
||||
self.play_btn.setToolTip("Play/Pause")
|
||||
self.play_btn.setFixedSize(40, 40)
|
||||
|
||||
self.stop_btn = QPushButton(FinaleIcons.get_icon("stop"), "")
|
||||
self.stop_btn.setToolTip("Stop")
|
||||
self.stop_btn.setFixedSize(40, 40)
|
||||
|
||||
self.record_btn = QPushButton(FinaleIcons.get_icon("record"), "")
|
||||
self.record_btn.setToolTip("Record")
|
||||
self.record_btn.setFixedSize(40, 40)
|
||||
self.record_btn.setCheckable(True)
|
||||
|
||||
self.snapshot_btn = QPushButton(FinaleIcons.get_icon("camera"), "")
|
||||
self.snapshot_btn.setToolTip("Take Snapshot")
|
||||
self.snapshot_btn.setFixedSize(40, 40)
|
||||
|
||||
controls_layout.addWidget(self.play_btn)
|
||||
controls_layout.addWidget(self.stop_btn)
|
||||
controls_layout.addWidget(self.record_btn)
|
||||
controls_layout.addWidget(self.snapshot_btn)
|
||||
controls_layout.addStretch()
|
||||
|
||||
# Overlay toggle
|
||||
self.overlay_btn = QPushButton(FinaleIcons.get_icon("visibility"), "Overlays")
|
||||
self.overlay_btn.setCheckable(True)
|
||||
self.overlay_btn.setChecked(True)
|
||||
self.overlay_btn.toggled.connect(self.toggle_overlays)
|
||||
|
||||
controls_layout.addWidget(self.overlay_btn)
|
||||
|
||||
video_layout.addLayout(controls_layout)
|
||||
layout.addLayout(video_layout, 3)
|
||||
|
||||
# Right panel for controls
|
||||
right_panel = QVBoxLayout()
|
||||
|
||||
# Source control
|
||||
self.source_control = SourceControlWidget()
|
||||
self.source_control.source_changed.connect(self.source_changed.emit)
|
||||
right_panel.addWidget(self.source_control)
|
||||
|
||||
# Detection control
|
||||
self.detection_control = DetectionControlWidget()
|
||||
right_panel.addWidget(self.detection_control)
|
||||
|
||||
# Detection info
|
||||
self.info_widget = QGroupBox("Detection Info")
|
||||
info_layout = QVBoxLayout(self.info_widget)
|
||||
|
||||
self.detection_count_label = QLabel("Detections: 0")
|
||||
self.fps_label = QLabel("FPS: 0.0")
|
||||
self.resolution_label = QLabel("Resolution: N/A")
|
||||
|
||||
info_layout.addWidget(self.detection_count_label)
|
||||
info_layout.addWidget(self.fps_label)
|
||||
info_layout.addWidget(self.resolution_label)
|
||||
|
||||
self.info_widget.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
right_panel.addWidget(self.info_widget)
|
||||
|
||||
right_panel.addStretch()
|
||||
|
||||
layout.addLayout(right_panel, 1)
|
||||
|
||||
# Apply theme
|
||||
self.apply_theme(True)
|
||||
|
||||
def update_frame(self, pixmap, detections=None):
|
||||
"""Update the video frame with detections"""
|
||||
if pixmap is None:
|
||||
return
|
||||
|
||||
self.current_detections = detections or []
|
||||
self.video_widget.update_frame(pixmap, self.current_detections)
|
||||
|
||||
# Update detection info
|
||||
self.detection_count_label.setText(f"Detections: {len(self.current_detections)}")
|
||||
|
||||
if pixmap:
|
||||
size = pixmap.size()
|
||||
self.resolution_label.setText(f"Resolution: {size.width()}x{size.height()}")
|
||||
|
||||
def update_fps(self, fps):
|
||||
"""Update FPS display"""
|
||||
self.fps_label.setText(f"FPS: {fps:.1f}")
|
||||
|
||||
@Slot(bool)
|
||||
def toggle_overlays(self, enabled):
|
||||
"""Toggle detection overlays"""
|
||||
self.video_widget.overlay_enabled = enabled
|
||||
# Refresh current frame
|
||||
if self.video_widget.current_pixmap:
|
||||
self.video_widget.update_frame(self.video_widget.current_pixmap, self.current_detections)
|
||||
|
||||
@Slot(int, int)
|
||||
def on_frame_clicked(self, x, y):
|
||||
"""Handle frame click for interaction"""
|
||||
print(f"Frame clicked at ({x}, {y})")
|
||||
# Could be used for region selection, etc.
|
||||
|
||||
def apply_theme(self, dark_mode=True):
|
||||
"""Apply theme to the view"""
|
||||
if dark_mode:
|
||||
self.setStyleSheet(f"""
|
||||
QWidget {{
|
||||
background-color: {MaterialColors.surface};
|
||||
color: {MaterialColors.text_primary};
|
||||
}}
|
||||
QPushButton {{
|
||||
background-color: {MaterialColors.primary};
|
||||
color: {MaterialColors.text_on_primary};
|
||||
border: none;
|
||||
border-radius: 20px;
|
||||
padding: 8px;
|
||||
}}
|
||||
QPushButton:hover {{
|
||||
background-color: {MaterialColors.primary_variant};
|
||||
}}
|
||||
QPushButton:checked {{
|
||||
background-color: {MaterialColors.secondary};
|
||||
}}
|
||||
""")
|
||||
634
qt_app_pyside1/finale/views/settings_view.py
Normal file
634
qt_app_pyside1/finale/views/settings_view.py
Normal file
@@ -0,0 +1,634 @@
|
||||
"""
|
||||
Settings View - Application configuration and preferences
|
||||
Manages all application settings, model configurations, and system preferences.
|
||||
"""
|
||||
|
||||
from PySide6.QtWidgets import (
|
||||
QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton,
|
||||
QGroupBox, QGridLayout, QFrame, QScrollArea, QTabWidget,
|
||||
QLineEdit, QSpinBox, QDoubleSpinBox, QComboBox, QCheckBox,
|
||||
QSlider, QTextEdit, QFileDialog, QMessageBox, QProgressBar,
|
||||
QFormLayout, QButtonGroup, QRadioButton
|
||||
)
|
||||
from PySide6.QtCore import Qt, Signal, Slot, QTimer, QSettings, QThread, pyqtSignal
|
||||
from PySide6.QtGui import QFont, QPixmap
|
||||
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Import finale components
|
||||
from ..styles import FinaleStyles, MaterialColors
|
||||
from ..icons import FinaleIcons
|
||||
from qt_app_pyside.ui.config_panel import ConfigPanel
|
||||
from qt_app_pyside.utils.helpers import load_configuration, save_configuration
|
||||
from qt_app_pyside.utils.helpers import format_timestamp, format_duration
|
||||
|
||||
class ModelConfigWidget(QGroupBox):
|
||||
"""
|
||||
Widget for configuring AI models and detection parameters.
|
||||
"""
|
||||
|
||||
config_changed = Signal(dict)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__("AI Model Configuration", parent)
|
||||
self.setup_ui()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup model configuration UI"""
|
||||
layout = QFormLayout(self)
|
||||
|
||||
# Vehicle detection model
|
||||
self.vehicle_model_edit = QLineEdit()
|
||||
self.vehicle_model_edit.setPlaceholderText("Path to vehicle detection model...")
|
||||
|
||||
vehicle_browse_btn = QPushButton(FinaleIcons.get_icon("folder"), "")
|
||||
vehicle_browse_btn.setFixedSize(32, 32)
|
||||
vehicle_browse_btn.clicked.connect(lambda: self.browse_model("vehicle"))
|
||||
|
||||
vehicle_layout = QHBoxLayout()
|
||||
vehicle_layout.addWidget(self.vehicle_model_edit)
|
||||
vehicle_layout.addWidget(vehicle_browse_btn)
|
||||
|
||||
layout.addRow("Vehicle Model:", vehicle_layout)
|
||||
|
||||
# Traffic light detection model
|
||||
self.traffic_model_edit = QLineEdit()
|
||||
self.traffic_model_edit.setPlaceholderText("Path to traffic light model...")
|
||||
|
||||
traffic_browse_btn = QPushButton(FinaleIcons.get_icon("folder"), "")
|
||||
traffic_browse_btn.setFixedSize(32, 32)
|
||||
traffic_browse_btn.clicked.connect(lambda: self.browse_model("traffic"))
|
||||
|
||||
traffic_layout = QHBoxLayout()
|
||||
traffic_layout.addWidget(self.traffic_model_edit)
|
||||
traffic_layout.addWidget(traffic_browse_btn)
|
||||
|
||||
layout.addRow("Traffic Light Model:", traffic_layout)
|
||||
|
||||
# Detection parameters
|
||||
self.confidence_spin = QDoubleSpinBox()
|
||||
self.confidence_spin.setRange(0.1, 1.0)
|
||||
self.confidence_spin.setSingleStep(0.05)
|
||||
self.confidence_spin.setValue(0.3)
|
||||
self.confidence_spin.setSuffix(" (30%)")
|
||||
layout.addRow("Confidence Threshold:", self.confidence_spin)
|
||||
|
||||
self.nms_spin = QDoubleSpinBox()
|
||||
self.nms_spin.setRange(0.1, 1.0)
|
||||
self.nms_spin.setSingleStep(0.05)
|
||||
self.nms_spin.setValue(0.45)
|
||||
layout.addRow("NMS Threshold:", self.nms_spin)
|
||||
|
||||
self.max_detections_spin = QSpinBox()
|
||||
self.max_detections_spin.setRange(10, 1000)
|
||||
self.max_detections_spin.setValue(100)
|
||||
layout.addRow("Max Detections:", self.max_detections_spin)
|
||||
|
||||
# Device selection
|
||||
self.device_combo = QComboBox()
|
||||
self.device_combo.addItems(["CPU", "GPU", "AUTO"])
|
||||
layout.addRow("Device:", self.device_combo)
|
||||
|
||||
# Model optimization
|
||||
self.optimize_check = QCheckBox("Enable Model Optimization")
|
||||
self.optimize_check.setChecked(True)
|
||||
layout.addRow(self.optimize_check)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
|
||||
@Slot()
|
||||
def browse_model(self, model_type):
|
||||
"""Browse for model file"""
|
||||
file_path, _ = QFileDialog.getOpenFileName(
|
||||
self, f"Select {model_type.title()} Model", "",
|
||||
"Model Files (*.xml *.onnx *.pt *.bin);;All Files (*)"
|
||||
)
|
||||
|
||||
if file_path:
|
||||
if model_type == "vehicle":
|
||||
self.vehicle_model_edit.setText(file_path)
|
||||
elif model_type == "traffic":
|
||||
self.traffic_model_edit.setText(file_path)
|
||||
|
||||
def get_config(self):
|
||||
"""Get current model configuration"""
|
||||
return {
|
||||
'vehicle_model': self.vehicle_model_edit.text(),
|
||||
'traffic_model': self.traffic_model_edit.text(),
|
||||
'confidence_threshold': self.confidence_spin.value(),
|
||||
'nms_threshold': self.nms_spin.value(),
|
||||
'max_detections': self.max_detections_spin.value(),
|
||||
'device': self.device_combo.currentText(),
|
||||
'optimize_model': self.optimize_check.isChecked()
|
||||
}
|
||||
|
||||
def set_config(self, config):
|
||||
"""Set model configuration"""
|
||||
self.vehicle_model_edit.setText(config.get('vehicle_model', ''))
|
||||
self.traffic_model_edit.setText(config.get('traffic_model', ''))
|
||||
self.confidence_spin.setValue(config.get('confidence_threshold', 0.3))
|
||||
self.nms_spin.setValue(config.get('nms_threshold', 0.45))
|
||||
self.max_detections_spin.setValue(config.get('max_detections', 100))
|
||||
self.device_combo.setCurrentText(config.get('device', 'CPU'))
|
||||
self.optimize_check.setChecked(config.get('optimize_model', True))
|
||||
|
||||
class ViolationConfigWidget(QGroupBox):
|
||||
"""
|
||||
Widget for configuring violation detection parameters.
|
||||
"""
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__("Violation Detection", parent)
|
||||
self.setup_ui()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup violation configuration UI"""
|
||||
layout = QFormLayout(self)
|
||||
|
||||
# Red light violation
|
||||
self.red_light_check = QCheckBox("Enable Red Light Detection")
|
||||
self.red_light_check.setChecked(True)
|
||||
layout.addRow(self.red_light_check)
|
||||
|
||||
self.red_light_sensitivity = QSlider(Qt.Horizontal)
|
||||
self.red_light_sensitivity.setRange(1, 10)
|
||||
self.red_light_sensitivity.setValue(5)
|
||||
layout.addRow("Red Light Sensitivity:", self.red_light_sensitivity)
|
||||
|
||||
# Speed violation
|
||||
self.speed_check = QCheckBox("Enable Speed Detection")
|
||||
self.speed_check.setChecked(True)
|
||||
layout.addRow(self.speed_check)
|
||||
|
||||
self.speed_limit_spin = QSpinBox()
|
||||
self.speed_limit_spin.setRange(10, 200)
|
||||
self.speed_limit_spin.setValue(50)
|
||||
self.speed_limit_spin.setSuffix(" km/h")
|
||||
layout.addRow("Speed Limit:", self.speed_limit_spin)
|
||||
|
||||
self.speed_tolerance_spin = QSpinBox()
|
||||
self.speed_tolerance_spin.setRange(0, 20)
|
||||
self.speed_tolerance_spin.setValue(5)
|
||||
self.speed_tolerance_spin.setSuffix(" km/h")
|
||||
layout.addRow("Speed Tolerance:", self.speed_tolerance_spin)
|
||||
|
||||
# Wrong lane detection
|
||||
self.wrong_lane_check = QCheckBox("Enable Wrong Lane Detection")
|
||||
self.wrong_lane_check.setChecked(True)
|
||||
layout.addRow(self.wrong_lane_check)
|
||||
|
||||
# Helmet detection
|
||||
self.helmet_check = QCheckBox("Enable Helmet Detection")
|
||||
self.helmet_check.setChecked(False)
|
||||
layout.addRow(self.helmet_check)
|
||||
|
||||
# Violation zone setup
|
||||
self.zone_setup_btn = QPushButton(FinaleIcons.get_icon("map"), "Setup Violation Zones")
|
||||
layout.addRow(self.zone_setup_btn)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
|
||||
class UIPreferencesWidget(QGroupBox):
|
||||
"""
|
||||
Widget for UI preferences and appearance settings.
|
||||
"""
|
||||
|
||||
theme_changed = Signal(bool) # dark_mode
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__("User Interface", parent)
|
||||
self.setup_ui()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup UI preferences"""
|
||||
layout = QFormLayout(self)
|
||||
|
||||
# Theme selection
|
||||
theme_group = QButtonGroup(self)
|
||||
self.dark_radio = QRadioButton("Dark Theme")
|
||||
self.light_radio = QRadioButton("Light Theme")
|
||||
self.auto_radio = QRadioButton("Auto (System)")
|
||||
|
||||
self.dark_radio.setChecked(True) # Default to dark
|
||||
|
||||
theme_group.addButton(self.dark_radio)
|
||||
theme_group.addButton(self.light_radio)
|
||||
theme_group.addButton(self.auto_radio)
|
||||
|
||||
theme_layout = QVBoxLayout()
|
||||
theme_layout.addWidget(self.dark_radio)
|
||||
theme_layout.addWidget(self.light_radio)
|
||||
theme_layout.addWidget(self.auto_radio)
|
||||
|
||||
layout.addRow("Theme:", theme_layout)
|
||||
|
||||
# Language selection
|
||||
self.language_combo = QComboBox()
|
||||
self.language_combo.addItems(["English", "Español", "Français", "Deutsch", "العربية"])
|
||||
layout.addRow("Language:", self.language_combo)
|
||||
|
||||
# Font size
|
||||
self.font_size_spin = QSpinBox()
|
||||
self.font_size_spin.setRange(8, 16)
|
||||
self.font_size_spin.setValue(9)
|
||||
layout.addRow("Font Size:", self.font_size_spin)
|
||||
|
||||
# Animations
|
||||
self.animations_check = QCheckBox("Enable Animations")
|
||||
self.animations_check.setChecked(True)
|
||||
layout.addRow(self.animations_check)
|
||||
|
||||
# Sound notifications
|
||||
self.sound_check = QCheckBox("Sound Notifications")
|
||||
self.sound_check.setChecked(True)
|
||||
layout.addRow(self.sound_check)
|
||||
|
||||
# Auto-save
|
||||
self.autosave_check = QCheckBox("Auto-save Configuration")
|
||||
self.autosave_check.setChecked(True)
|
||||
layout.addRow(self.autosave_check)
|
||||
|
||||
# Update interval
|
||||
self.update_interval_spin = QSpinBox()
|
||||
self.update_interval_spin.setRange(100, 5000)
|
||||
self.update_interval_spin.setValue(1000)
|
||||
self.update_interval_spin.setSuffix(" ms")
|
||||
layout.addRow("Update Interval:", self.update_interval_spin)
|
||||
|
||||
# Connect theme signals
|
||||
self.dark_radio.toggled.connect(lambda checked: self.theme_changed.emit(True) if checked else None)
|
||||
self.light_radio.toggled.connect(lambda checked: self.theme_changed.emit(False) if checked else None)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
|
||||
class PerformanceWidget(QGroupBox):
|
||||
"""
|
||||
Widget for performance and system settings.
|
||||
"""
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__("Performance", parent)
|
||||
self.setup_ui()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup performance settings"""
|
||||
layout = QFormLayout(self)
|
||||
|
||||
# Processing threads
|
||||
self.threads_spin = QSpinBox()
|
||||
self.threads_spin.setRange(1, 16)
|
||||
self.threads_spin.setValue(4)
|
||||
layout.addRow("Processing Threads:", self.threads_spin)
|
||||
|
||||
# Frame buffer size
|
||||
self.buffer_size_spin = QSpinBox()
|
||||
self.buffer_size_spin.setRange(1, 100)
|
||||
self.buffer_size_spin.setValue(10)
|
||||
layout.addRow("Frame Buffer Size:", self.buffer_size_spin)
|
||||
|
||||
# Memory limit
|
||||
self.memory_limit_spin = QSpinBox()
|
||||
self.memory_limit_spin.setRange(512, 8192)
|
||||
self.memory_limit_spin.setValue(2048)
|
||||
self.memory_limit_spin.setSuffix(" MB")
|
||||
layout.addRow("Memory Limit:", self.memory_limit_spin)
|
||||
|
||||
# GPU acceleration
|
||||
self.gpu_check = QCheckBox("Enable GPU Acceleration")
|
||||
self.gpu_check.setChecked(False)
|
||||
layout.addRow(self.gpu_check)
|
||||
|
||||
# Performance mode
|
||||
self.performance_combo = QComboBox()
|
||||
self.performance_combo.addItems(["Balanced", "Performance", "Power Save"])
|
||||
layout.addRow("Performance Mode:", self.performance_combo)
|
||||
|
||||
# Logging level
|
||||
self.logging_combo = QComboBox()
|
||||
self.logging_combo.addItems(["DEBUG", "INFO", "WARNING", "ERROR"])
|
||||
self.logging_combo.setCurrentText("INFO")
|
||||
layout.addRow("Logging Level:", self.logging_combo)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
|
||||
class DataManagementWidget(QGroupBox):
|
||||
"""
|
||||
Widget for data storage and export settings.
|
||||
"""
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__("Data Management", parent)
|
||||
self.setup_ui()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup data management settings"""
|
||||
layout = QFormLayout(self)
|
||||
|
||||
# Data directory
|
||||
self.data_dir_edit = QLineEdit()
|
||||
self.data_dir_edit.setPlaceholderText("Data storage directory...")
|
||||
|
||||
data_browse_btn = QPushButton(FinaleIcons.get_icon("folder"), "")
|
||||
data_browse_btn.setFixedSize(32, 32)
|
||||
data_browse_btn.clicked.connect(self.browse_data_directory)
|
||||
|
||||
data_layout = QHBoxLayout()
|
||||
data_layout.addWidget(self.data_dir_edit)
|
||||
data_layout.addWidget(data_browse_btn)
|
||||
|
||||
layout.addRow("Data Directory:", data_layout)
|
||||
|
||||
# Auto-export
|
||||
self.auto_export_check = QCheckBox("Auto-export Violations")
|
||||
layout.addRow(self.auto_export_check)
|
||||
|
||||
# Export format
|
||||
self.export_format_combo = QComboBox()
|
||||
self.export_format_combo.addItems(["JSON", "CSV", "XML", "PDF"])
|
||||
layout.addRow("Export Format:", self.export_format_combo)
|
||||
|
||||
# Data retention
|
||||
self.retention_spin = QSpinBox()
|
||||
self.retention_spin.setRange(1, 365)
|
||||
self.retention_spin.setValue(30)
|
||||
self.retention_spin.setSuffix(" days")
|
||||
layout.addRow("Data Retention:", self.retention_spin)
|
||||
|
||||
# Backup settings
|
||||
self.backup_check = QCheckBox("Enable Automatic Backup")
|
||||
layout.addRow(self.backup_check)
|
||||
|
||||
self.backup_interval_combo = QComboBox()
|
||||
self.backup_interval_combo.addItems(["Daily", "Weekly", "Monthly"])
|
||||
layout.addRow("Backup Interval:", self.backup_interval_combo)
|
||||
|
||||
# Database cleanup
|
||||
cleanup_btn = QPushButton(FinaleIcons.get_icon("delete"), "Cleanup Old Data")
|
||||
layout.addRow(cleanup_btn)
|
||||
|
||||
# Apply styling
|
||||
self.setStyleSheet(FinaleStyles.get_group_box_style())
|
||||
|
||||
@Slot()
|
||||
def browse_data_directory(self):
|
||||
"""Browse for data directory"""
|
||||
directory = QFileDialog.getExistingDirectory(
|
||||
self, "Select Data Directory", self.data_dir_edit.text()
|
||||
)
|
||||
if directory:
|
||||
self.data_dir_edit.setText(directory)
|
||||
|
||||
class SettingsView(QWidget):
|
||||
"""
|
||||
Main settings view with tabbed configuration sections.
|
||||
"""
|
||||
|
||||
settings_changed = Signal(dict)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.config = load_configuration('config.json')
|
||||
# Add configuration panel from original
|
||||
self.config_panel = ConfigPanel()
|
||||
self.settings = QSettings("Finale", "TrafficMonitoring")
|
||||
self.setup_ui()
|
||||
self.load_settings()
|
||||
|
||||
def setup_ui(self):
|
||||
"""Setup the settings view UI"""
|
||||
layout = QVBoxLayout(self)
|
||||
layout.setContentsMargins(16, 16, 16, 16)
|
||||
layout.setSpacing(16)
|
||||
|
||||
# Header
|
||||
header_layout = QHBoxLayout()
|
||||
|
||||
title_label = QLabel("Settings")
|
||||
title_label.setFont(QFont("Segoe UI", 18, QFont.Bold))
|
||||
|
||||
# Action buttons
|
||||
self.reset_btn = QPushButton(FinaleIcons.get_icon("refresh"), "Reset to Defaults")
|
||||
self.reset_btn.clicked.connect(self.reset_to_defaults)
|
||||
|
||||
self.export_btn = QPushButton(FinaleIcons.get_icon("export"), "Export Settings")
|
||||
self.export_btn.clicked.connect(self.export_settings)
|
||||
|
||||
self.import_btn = QPushButton(FinaleIcons.get_icon("import"), "Import Settings")
|
||||
self.import_btn.clicked.connect(self.import_settings)
|
||||
|
||||
header_layout.addWidget(title_label)
|
||||
header_layout.addStretch()
|
||||
header_layout.addWidget(self.reset_btn)
|
||||
header_layout.addWidget(self.export_btn)
|
||||
header_layout.addWidget(self.import_btn)
|
||||
|
||||
layout.addLayout(header_layout)
|
||||
|
||||
# Settings tabs
|
||||
self.tabs = QTabWidget()
|
||||
|
||||
# Create configuration widgets
|
||||
self.model_config = ModelConfigWidget()
|
||||
self.violation_config = ViolationConfigWidget()
|
||||
self.ui_preferences = UIPreferencesWidget()
|
||||
self.performance_config = PerformanceWidget()
|
||||
self.data_management = DataManagementWidget()
|
||||
|
||||
# Add tabs
|
||||
self.tabs.addTab(self.model_config, FinaleIcons.get_icon("model"), "AI Models")
|
||||
self.tabs.addTab(self.violation_config, FinaleIcons.get_icon("warning"), "Violations")
|
||||
self.tabs.addTab(self.ui_preferences, FinaleIcons.get_icon("palette"), "Interface")
|
||||
self.tabs.addTab(self.performance_config, FinaleIcons.get_icon("speed"), "Performance")
|
||||
self.tabs.addTab(self.data_management, FinaleIcons.get_icon("database"), "Data")
|
||||
|
||||
# Style tabs
|
||||
self.tabs.setStyleSheet(FinaleStyles.get_tab_widget_style())
|
||||
|
||||
layout.addWidget(self.tabs, 1)
|
||||
|
||||
# Bottom action bar
|
||||
action_layout = QHBoxLayout()
|
||||
|
||||
self.apply_btn = QPushButton(FinaleIcons.get_icon("check"), "Apply")
|
||||
self.apply_btn.clicked.connect(self.apply_settings)
|
||||
|
||||
self.save_btn = QPushButton(FinaleIcons.get_icon("save"), "Save")
|
||||
self.save_btn.clicked.connect(self.save_settings)
|
||||
|
||||
self.cancel_btn = QPushButton(FinaleIcons.get_icon("close"), "Cancel")
|
||||
self.cancel_btn.clicked.connect(self.cancel_changes)
|
||||
|
||||
action_layout.addStretch()
|
||||
action_layout.addWidget(self.apply_btn)
|
||||
action_layout.addWidget(self.save_btn)
|
||||
action_layout.addWidget(self.cancel_btn)
|
||||
|
||||
layout.addLayout(action_layout)
|
||||
|
||||
# Connect signals
|
||||
self.ui_preferences.theme_changed.connect(self.on_theme_changed)
|
||||
|
||||
# Apply theme
|
||||
self.apply_theme(True)
|
||||
|
||||
def load_settings(self):
|
||||
"""Load settings from QSettings"""
|
||||
# Load model configuration
|
||||
model_config = {
|
||||
'vehicle_model': self.settings.value('model/vehicle_model', ''),
|
||||
'traffic_model': self.settings.value('model/traffic_model', ''),
|
||||
'confidence_threshold': self.settings.value('model/confidence_threshold', 0.3, float),
|
||||
'nms_threshold': self.settings.value('model/nms_threshold', 0.45, float),
|
||||
'max_detections': self.settings.value('model/max_detections', 100, int),
|
||||
'device': self.settings.value('model/device', 'CPU'),
|
||||
'optimize_model': self.settings.value('model/optimize_model', True, bool)
|
||||
}
|
||||
self.model_config.set_config(model_config)
|
||||
|
||||
# Load UI preferences
|
||||
dark_mode = self.settings.value('ui/dark_mode', True, bool)
|
||||
if dark_mode:
|
||||
self.ui_preferences.dark_radio.setChecked(True)
|
||||
else:
|
||||
self.ui_preferences.light_radio.setChecked(True)
|
||||
|
||||
@Slot()
|
||||
def apply_settings(self):
|
||||
"""Apply current settings"""
|
||||
settings_data = self.get_all_settings()
|
||||
self.settings_changed.emit(settings_data)
|
||||
|
||||
@Slot()
|
||||
def save_settings(self):
|
||||
"""Save settings to QSettings"""
|
||||
# Save model configuration
|
||||
model_config = self.model_config.get_config()
|
||||
for key, value in model_config.items():
|
||||
self.settings.setValue(f'model/{key}', value)
|
||||
|
||||
# Save UI preferences
|
||||
self.settings.setValue('ui/dark_mode', self.ui_preferences.dark_radio.isChecked())
|
||||
|
||||
# Sync settings
|
||||
self.settings.sync()
|
||||
|
||||
QMessageBox.information(self, "Settings Saved", "Settings have been saved successfully.")
|
||||
save_configuration(settings_data, 'config.json')
|
||||
|
||||
@Slot()
|
||||
def cancel_changes(self):
|
||||
"""Cancel changes and reload settings"""
|
||||
self.load_settings()
|
||||
|
||||
@Slot()
|
||||
def reset_to_defaults(self):
|
||||
"""Reset all settings to defaults"""
|
||||
reply = QMessageBox.question(
|
||||
self, "Reset Settings",
|
||||
"Are you sure you want to reset all settings to defaults?",
|
||||
QMessageBox.Yes | QMessageBox.No
|
||||
)
|
||||
|
||||
if reply == QMessageBox.Yes:
|
||||
self.settings.clear()
|
||||
self.load_settings()
|
||||
|
||||
@Slot()
|
||||
def export_settings(self):
|
||||
"""Export settings to file"""
|
||||
file_path, _ = QFileDialog.getSaveFileName(
|
||||
self, "Export Settings", "",
|
||||
"JSON Files (*.json);;All Files (*)"
|
||||
)
|
||||
|
||||
if file_path:
|
||||
settings_data = self.get_all_settings()
|
||||
try:
|
||||
with open(file_path, 'w') as f:
|
||||
json.dump(settings_data, f, indent=2)
|
||||
QMessageBox.information(self, "Export Successful", "Settings exported successfully.")
|
||||
except Exception as e:
|
||||
QMessageBox.critical(self, "Export Error", f"Failed to export settings:\n{str(e)}")
|
||||
|
||||
@Slot()
|
||||
def import_settings(self):
|
||||
"""Import settings from file"""
|
||||
file_path, _ = QFileDialog.getOpenFileName(
|
||||
self, "Import Settings", "",
|
||||
"JSON Files (*.json);;All Files (*)"
|
||||
)
|
||||
|
||||
if file_path:
|
||||
try:
|
||||
with open(file_path, 'r') as f:
|
||||
settings_data = json.load(f)
|
||||
|
||||
# Apply imported settings
|
||||
self.apply_imported_settings(settings_data)
|
||||
QMessageBox.information(self, "Import Successful", "Settings imported successfully.")
|
||||
|
||||
except Exception as e:
|
||||
QMessageBox.critical(self, "Import Error", f"Failed to import settings:\n{str(e)}")
|
||||
|
||||
def get_all_settings(self):
|
||||
"""Get all current settings as dictionary"""
|
||||
return {
|
||||
'model': self.model_config.get_config(),
|
||||
'ui': {
|
||||
'dark_mode': self.ui_preferences.dark_radio.isChecked(),
|
||||
'language': self.ui_preferences.language_combo.currentText(),
|
||||
'font_size': self.ui_preferences.font_size_spin.value(),
|
||||
'animations': self.ui_preferences.animations_check.isChecked(),
|
||||
'sound': self.ui_preferences.sound_check.isChecked()
|
||||
}
|
||||
}
|
||||
|
||||
def apply_imported_settings(self, settings_data):
|
||||
"""Apply imported settings data"""
|
||||
if 'model' in settings_data:
|
||||
self.model_config.set_config(settings_data['model'])
|
||||
|
||||
if 'ui' in settings_data:
|
||||
ui_settings = settings_data['ui']
|
||||
if 'dark_mode' in ui_settings:
|
||||
if ui_settings['dark_mode']:
|
||||
self.ui_preferences.dark_radio.setChecked(True)
|
||||
else:
|
||||
self.ui_preferences.light_radio.setChecked(True)
|
||||
|
||||
@Slot(bool)
|
||||
def on_theme_changed(self, dark_mode):
|
||||
"""Handle theme change"""
|
||||
self.apply_theme(dark_mode)
|
||||
|
||||
def apply_theme(self, dark_mode=True):
|
||||
"""Apply theme to the view"""
|
||||
if dark_mode:
|
||||
self.setStyleSheet(f"""
|
||||
QWidget {{
|
||||
background-color: {MaterialColors.surface};
|
||||
color: {MaterialColors.text_primary};
|
||||
}}
|
||||
QPushButton {{
|
||||
background-color: {MaterialColors.primary};
|
||||
color: {MaterialColors.text_on_primary};
|
||||
border: none;
|
||||
border-radius: 6px;
|
||||
padding: 8px 16px;
|
||||
}}
|
||||
QPushButton:hover {{
|
||||
background-color: {MaterialColors.primary_variant};
|
||||
}}
|
||||
""")
|
||||
|
||||
def display_timestamp(self, ts):
|
||||
return format_timestamp(ts)
|
||||
def display_duration(self, seconds):
|
||||
return format_duration(seconds)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user