diff --git a/.gitignore b/.gitignore index bae928a..0c80c65 100755 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,23 @@ *.npy *.loglogin *.cpython* +*.h5 visualization_learning collector_tests -unused_files \ No newline at end of file +unused_files +test/ +fix.md +error*.txt +.venv/ +venv/ +__pycache__/ +*.pyc +output/ +*.png +*.csv +!dummy/*.h5 +dist/ +build/ +.vscode +*.log +.continue \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..2956e39 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,131 @@ +# What's New in DashPVA + +This file tracks the latest changes, features, and improvements in DashPVA. + +--- + +## Latest Changes (January 2026) +- Workbench now supports loading compressed datasets for smoother analysis and smaller storage footprints. For legacy files, a converter will be provided to update the file structure so compression loads seamlessly. + +# Latest Changes (December 2025) +Merry Christmas + +The Post-Analysis Workbench is a unified workspace that turns raw HKL data into an interactive 3D volume. It keeps 1D line profiles, 2D detector slices, and 3D voxel views in sync—select a point in any view and the others update instantly. This makes it easy to confirm peaks, filter noise in real time, and explore crystal symmetry from every angle. + +## Latest Changes (November 2025) + +### New Features +- Selective compression support for large HDF5 datasets using Blosc (LZ4), applied only where it provides clear benefits. +- Compatible file structure for compressed data, ensuring seamless loading via `utils.hdf5_loader.HDF5Loader`. + +### Improvements +- Faster load times and reduced disk footprint for large arrays written to `/entry/data/data`. +- Graceful handling when compression plugins are unavailable (falls back to uncompressed writes). +- Clearer loading messages and error reporting during data import. + +### Usage +- Files produced by `compress.py` can be loaded with: + - `HDF5Loader.load_h5_to_3d(path)` for points + intensities + - `HDF5Loader.load_h5_volume_3d(path)` for volume workflows + + +## Latest Changes (October 2025) + +### New Features +- Launcher: Force-shutdown dialog now lists all running modules with their process IDs (PIDs) for full visibility before termination. +- HKL Slice 3D Tool: Added a 2D viewer under Tools to quickly inspect any 3D slice in 2D. + +### Bug Fixes and Improvements +- HKL Range Handling: Adjusted HKL index/range bounds and validation in HKL 3D Viewer and HKL 3D Slicer to ensure accurate limits and improved user feedback. +- Launcher: Enhanced process tracking with clearer status text and contextual enablement of “Shutdown All”. + +## Latest Changes (September 2025) + +### New Features + +#### Command Line Interface (CLI) +- **NEW**: Introduced unified CLI interface via `dashpva.py` + - `python dashpva.py run` - Launch dashpva + - `python dashpva.py hkl3d` - Launch HKL 3D Viewer + - `python dashpva.py slice3d` - Launch HKL 3D Slicer (standalone mode) + - `python dashpva.py detector` - Launch Area Detector Viewer + - `python dashpva.py setup` - Run PVA workflow setup (with optional `--sim` flag) + +#### 3D Visualization Enhancements +- **NEW**: HKL 3D Slice Window for interactive 3D point cloud visualization +- **NEW**: Standalone HKL 3D Slicer mode for offline data analysis +- **ENHANCED**: Loading indicators for 3D parent slice window data loading +- **ENHANCED**: Configurable reduction factor before loading data +- **ENHANCED**: Window disabling during data loading operations + +#### Data Loading and Processing +- **NEW**: HDF5 data loading capabilities +- **NEW**: Slice extraction and analysis tools +- **NEW**: Interactive 3D visualization with real-time slicing +- **ENHANCED**: Improved data caching and processing workflows + +#### User Interface Improvements +- **NEW**: SizeManager for automatic and clean window scaling on resize +- **ENHANCED**: Area detector viewer now includes SizeManager +- **IMPROVED**: Replaced old font scaling with SizeManager system + +#### 2D Slice viewing in 3d viewer +View the 3D sliced data in 2d + +### Bug Fixes and Improvements + +#### Configuration and Setup +- **FIXED**: PV simulator server size can now be changed through GUI +- **UPDATED**: CLI setup command changed from `sim` to `setup` for setup dialog +- **IMPROVED**: Path management for parent directory navigation in area detector + +#### Performance and Stability +- **OPTIMIZED**: Performance improvements for large dataset handling +- **ENHANCED**: Better memory management for 3D visualization +- **IMPROVED**: More responsive UI during data loading operations + +#### Code Quality +- **CLEANUP**: Commented out unused LoadDataHandler and PerformanceDialog utilities +- **REFACTORED**: Improved code organization and structure +- **ADDED**: Comprehensive .gitignore file for better repository management + +--- + +## How to Use This File + +This changelog follows these conventions: +- New Features: Major new functionality and capabilities +- Bug Fixes and Improvements: Fixes, optimizations, and enhancements +- Documentation: Updates to documentation and guides +- Breaking Changes: Changes that may affect existing workflows + +--- + +## Getting Started with New Features + +### Using the New CLI +```bash +# Get help on all available commands +python dashpva.py --help + +# Launch different components +python dashpva.py setup # Configure the system +python dashpva.py detector # Area detector viewer +python dashpva.py hkl3d # 3D visualization +python dashpva.py slice3d # Standalone 3D slicer +``` + +### 3D Visualization +The new 3D visualization tools support: +- Interactive point cloud visualization +- Real-time slicing and analysis +- HDF5 file loading +- Performance optimization for large datasets + +For detailed instructions, see [RUN_INSTRUCTIONS.md](RUN_INSTRUCTIONS.md). + +--- + +## Previous Versions + +*This is the initial version of the What's New file. Future releases will be documented here.* diff --git a/README.md b/README.md index 2ba20dd..bc90168 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,9 @@ DashPVA enables distributed analysis and real-time image streaming using PvaPy a ## Getting Started Follow the instructions in the [RUN_INSTRUCTIONS.md](RUN_INSTRUCTIONS.md) for setup and usage. +## What's New +Check out [CHANGELOG.md](CHANGELOG.md) for the latest features, improvements, and changes in DashPVA. + # **FAQ: Area Detector and Data Analysis System** ### _What is the primary function of the software described in the provided sources?_ @@ -37,4 +40,4 @@ Follow the instructions in the [RUN_INSTRUCTIONS.md](RUN_INSTRUCTIONS.md) for se - The software heavily relies on EPICS (Experimental Physics and Industrial Control System) for both data acquisition and control of experiment parameters. It uses Channel Access (CA) for monitoring PVs that specify ROIs. The PVAccess (PVA) library is used to subscribe to detector data, and control PVs, and the metadata associated with the detector are also monitored through EPICS. It also is able to read custom PV names from external configuration files. ### _What is the workflow for setting up and running an analysis?_ -- The workflow begins by configuring the system using a dialog box, including the prefix for the PVs and the address of the data collector. The user then loads a configuration file defining the PVs to be monitored. The user can define ROIs, and open the analysis window that displays the data, allows setting up scan locations through uploaded .npy files, and gives the ability to save the data into HDF5 files. It is also capable of running both simulated and real data. \ No newline at end of file +- The workflow begins by configuring the system using a dialog box, including the prefix for the PVs and the address of the data collector. The user then loads a configuration file defining the PVs to be monitored. The user can define ROIs, and open the analysis window that displays the data, allows setting up scan locations through uploaded .npy files, and gives the ability to save the data into HDF5 files. It is also capable of running both simulated and real data. diff --git a/RUN_INSTRUCTIONS.md b/RUN_INSTRUCTIONS.md index a0c149b..fbe8a28 100644 --- a/RUN_INSTRUCTIONS.md +++ b/RUN_INSTRUCTIONS.md @@ -7,6 +7,117 @@ This guide provides step-by-step instructions to set up, configure, and run the ## Setup ### Install Dependencies + +You can install DashPVA dependencies using either **Conda** (recommended for full compatibility) or **UV** (faster installation). Choose the method that best fits your needs. + +#### Option 1: Using UV (Fast Installation) + +[UV](https://github.com/astral-sh/uv) is an extremely fast Python package installer and resolver written in Rust. It provides much faster dependency resolution and installation compared to traditional pip. + +**Prerequisites:** +- Python 3.11 installed on your system + +**Installation Steps:** + +1. **Install UV** (if not already installed): + + **Linux/macOS:** + ```bash + curl -LsSf https://astral.sh/uv/install.sh | sh + ``` + + After installation, add UV to your PATH: + ```bash + # For bash/zsh (Linux/macOS) + source $HOME/.local/bin/env + + # Or add permanently to ~/.bashrc or ~/.zshrc: + export PATH="$HOME/.local/bin:$PATH" + ``` + + **Windows (PowerShell):** + ```powershell + powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" + ``` + + After installation, restart your terminal or add to PATH: + ```powershell + $env:PATH += ";$env:USERPROFILE\.cargo\bin" + ``` + + **Alternative (all platforms):** + ```bash + pip install uv + ``` + +2. **Install dependencies** (UV will automatically create a virtual environment): + ```bash + uv sync + ``` + + This single command will: + - Create a virtual environment (`.venv/`) automatically + - Install all dependencies from `pyproject.toml` + - Use locked versions from `uv.lock` for reproducible installs + +3. **Activate the environment and run the application:** + + **Option A: Activate the virtual environment (traditional way):** + ```bash + # Linux/macOS + source .venv/bin/activate + + # Windows (PowerShell) + .venv\Scripts\Activate.ps1 + + # Windows (Command Prompt) + .venv\Scripts\activate.bat + ``` + + Then run your commands normally: + ```bash + python dashpva.py setup + ``` + + **Option B: Use UV to run commands directly (no activation needed):** + ```bash + uv run python dashpva.py setup + uv run python dashpva.py detector + ``` + +**Note:** All dependencies including `pvapy` (required for PVAccess) are automatically installed via `uv sync`. No conda installation is needed! + +**Quick Start Summary:** +```bash +# 1. Install UV (one-time setup) +curl -LsSf https://astral.sh/uv/install.sh | sh # Linux/macOS +# OR: pip install uv # All platforms + +# 2. Install all dependencies (creates .venv automatically) +uv sync + +# 3. Activate and run +source .venv/bin/activate # Linux/macOS +# OR: uv run python dashpva.py setup # No activation needed +``` + +**Verify Installation:** +```bash +uv --version +uv pip list +``` + +**Updating Dependencies:** +```bash +# Update dependencies and regenerate lock file +uv lock --upgrade + +# Sync with updated dependencies +uv sync +``` + +#### Option 2: Using Conda (Full Compatibility) + Using the [environment.yml](environment.yml) file, you can install the environment using the conda command: ```bash conda env create -f environment.yml @@ -28,7 +139,7 @@ Instead of using the `environment.yml` file, you can follow these manual instruc pip install pyepics ``` -### Verify Installation +**Verify Conda Installation:** Ensure all dependencies are installed correctly: ```bash conda list @@ -38,12 +149,21 @@ conda list ## Running the Application -### 1. Configuration GUI (ConfigDialog) -The configuration GUI is used to set up detector prefixes, collector addresses, and PV configurations. +DashPVA now uses a command-line interface (CLI) for launching different components. All commands use the main `dashpva.py` script. + +### Available Commands + +#### 1. Setup and Configuration +Set up PVA workflow and configure the system: **Run Command**: ```bash -python area_det_viewer.py +python dashpva.py setup +``` + +**With Simulator**: +```bash +python dashpva.py setup --sim ``` **Key Features**: @@ -51,15 +171,12 @@ python area_det_viewer.py - Load, edit, or create PV configuration files. - Input caching frequency for live view. -### 2. Live Viewer GUI (ImageWindow) -The live image visualization GUI allows users to: -- Stream live images from a PVA source. -- View and manipulate regions of interest (ROIs). -- Monitor statistics for live analysis. +#### 2. Area Detector Viewer +Launch the live image visualization GUI: **Run Command**: ```bash -python area_det_viewer.py +python dashpva.py detector ``` **GUI Features**: @@ -68,6 +185,272 @@ python area_det_viewer.py - **Statistical Monitoring**: View and log key metrics from the live feed. - **Frame-by-Frame Processing**: Supports both predetermined and spontaneous scan modes. +#### 3. HKL 3D Viewer +Launch the interactive 3D visualization tool: + +**Run Command**: +```bash +python dashpva.py hkl3d +``` + +**Features**: +- Interactive 3D point cloud visualization +- Real-time data streaming and analysis +- Integration with PVA data sources + +#### 4. HKL 3D Slicer (Standalone) +Launch the standalone 3D slicer for offline data analysis: + +**Run Command**: +```bash +python dashpva.py slice3d +``` + +**Features**: +- Interactive 3D visualization with real-time slicing +- HDF5 data loading capabilities +- Slice extraction and analysis tools +- Loading indicators for large datasets +- Configurable reduction factors for performance optimization + +### Quick Reference +```bash +# Run the launcher +python dashpva.py run + +# Setup the system +python dashpva.py setup + +# Launch area detector viewer +python dashpva.py detector + +# Launch 3D visualization tools +python dashpva.py hkl3d +python dashpva.py slice3d + +# Get help on available commands +python dashpva.py --help +``` + +--- + +## HKL Live Streaming Setup + +For HKL (reciprocal space) live streaming and analysis, DashPVA uses a multi-stage pipeline that processes detector images through several consumers before displaying HKL coordinates in real-time. + +### Data Flow Pipeline + +``` +Detector → Metadata Associator → Collector → RSM Consumer → HKL Viewer +``` + +Each stage adds or processes data: +- **Detector**: Raw image data from area detector +- **Metadata Associator**: Attaches motor positions and metadata to images +- **Collector**: Collects and buffers images with metadata +- **RSM Consumer**: Calculates HKL coordinates from motor positions +- **HKL Viewer**: Displays 3D HKL visualization + +### Configuration Requirements + +Before starting HKL streaming, you must configure the TOML file with your beamline-specific PVs. + +#### 1. Edit `pv_configs/metadata_pvs.toml` + +**A. Set Detector Prefix (Line 2):** +```toml +DETECTOR_PREFIX = 'your_beamline:detector_prefix' +# Example: '11idb:AD1' or '8idb:detector' +``` + +**B. Configure Metadata PVs (Lines 24-30):** +```toml +[METADATA] + [METADATA.CA] + # Add your Channel Access PVs (motor positions, etc.) + x = 'your_beamline:x_motor_RBV' + y = 'your_beamline:y_motor_RBV' + # Add any other metadata PVs needed + + [METADATA.PVA] + # Add your PVAccess PVs here if any +``` + +**C. Configure HKL Section (Lines 83-154):** +This section is critical for HKL calculations. Update all motor PVs, spec PVs, and detector setup: + +```toml +[HKL] + # Sample Circle Motors (typically 4 axes) + [HKL.SAMPLE_CIRCLE_AXIS_1] + AXIS_NUMBER = 'your_beamline:motor1_RBV:AxisNumber' + DIRECTION_AXIS = 'your_beamline:motor1_RBV:DirectionAxis' + POSITION = 'your_beamline:motor1_RBV:Position' + + # Repeat for SAMPLE_CIRCLE_AXIS_2, 3, 4 + # And DETECTOR_CIRCLE_AXIS_1, 2 + + [HKL.SPEC] + ENERGY_VALUE = 'your_beamline:spec:Energy:Value' + UB_MATRIX_VALUE = 'your_beamline:spec:UB_matrix:Value' + + [HKL.DETECTOR_SETUP] + CENTER_CHANNEL_PIXEL = 'your_beamline:DetectorSetup:CenterChannelPixel' + DISTANCE = 'your_beamline:DetectorSetup:Distance' + PIXEL_DIRECTION_1 = 'your_beamline:DetectorSetup:PixelDirection1' + PIXEL_DIRECTION_2 = 'your_beamline:DetectorSetup:PixelDirection2' + SIZE = 'your_beamline:DetectorSetup:Size' + UNITS = 'your_beamline:DetectorSetup:Units' +``` + +**Note:** For different beamlines, create a beamline-specific config file: +```bash +cp pv_configs/metadata_pvs.toml pv_configs/metadata_pvs_YOUR_BEAMLINE.toml +``` + +### Startup Sequence for HKL Live Streaming + +Follow these steps in order to start the complete HKL streaming pipeline: + +#### **Terminal 1: Area Detector Viewer (Live View)** +```bash +python dashpva.py detector +``` + +1. Enter your PVA channel name (e.g., `'11idb:detector:Image'`) +2. Click "Start Live View" +3. **Keep this terminal running** - This shows live detector images + +**Purpose:** Verify detector is streaming correctly before starting the processing pipeline. + +--- + +#### **Terminal 2: PVA Workflow Setup** +```bash +python dashpva.py setup +``` + +This opens the PVA Setup Dialog with multiple tabs. Configure each component: + +##### **Tab 1: Config Upload** +1. Click "Browse" and select your `metadata_pvs.toml` file (or beamline-specific version) +2. The "Current Mode" label will show the caching mode from your config +3. **This config file will be used by all consumers** + +##### **Tab 2: Metadata Associator** +This consumer attaches metadata (motor positions, etc.) to detector images. + +**Configuration:** +- **Input Channel**: Your detector PVA channel (e.g., `'11idb:detector:Image'`) +- **Output Channel**: Where associator sends data (e.g., `'processor:associator:output'`) +- **Control Channel**: `'processor:*:control'` (default) +- **Status Channel**: `'processor:*:status'` (default) +- **Processor File**: `consumers/hpc_metadata_consumer.py` +- **Processor Class**: `HpcAdMetadataProcessor` +- **Report Period**: `5` (seconds, default) +- **Server Queue Size**: `100` (default) +- **N Consumers**: `1` (default) +- **Distributor Updates**: `10` (default) + +**Action:** Click **"Run Associator Consumers"** + +**What it does:** Reads PVs from `[METADATA]` and `[HKL]` sections of your TOML file and attaches their values to each detector image frame. + +--- + +##### **Tab 3: Collector** +This consumer collects and buffers images with attached metadata. + +**Configuration:** +- **Collector ID**: `1` (default) +- **Producer ID List**: `1` (default, comma-separated if multiple) +- **Input Channel**: Same as Associator Output Channel (e.g., `'processor:associator:output'`) +- **Output Channel**: Where collector sends data (e.g., `'processor:collector:output'`) +- **Control Channel**: `'processor:*:control'` (default) +- **Status Channel**: `'processor:*:status'` (default) +- **Processor File**: `consumers/hpc_passthrough_consumer.py` +- **Processor Class**: `HpcPassthroughProcessor` +- **Report Period**: `5` (seconds, default) +- **Server Queue Size**: `100` (default) +- **Collector Cache Size**: `1000` (default) + +**Action:** Click **"Run Collector"** + +**What it does:** Collects images with metadata from the associator and forwards them to the next stage. + +--- + +##### **Tab 4: Analysis Consumer (RSM Consumer)** +This consumer calculates HKL coordinates from motor positions. + +**Configuration:** +- **Input Channel**: Same as Collector Output Channel (e.g., `'processor:collector:output'`) +- **Output Channel**: Where RSM data goes (e.g., `'processor:rsm:output'`) +- **Control Channel**: `'processor:*:control'` (default) +- **Status Channel**: `'processor:*:status'` (default) +- **Processor File**: `consumers/hpc_rsm_consumer.py` +- **Processor Class**: `HpcRsmProcessor` +- **Report Period**: `5` (seconds, default) +- **Server Queue Size**: `100` (default) +- **N Consumers**: `1` (default) +- **Distributor Updates**: `10` (default) + +**Action:** Click **"Run Analysis Consumer"** + +**What it does:** +- Reads motor positions from the `[HKL]` section of your TOML file +- Calculates reciprocal space (HKL) coordinates using xrayutilities +- Outputs HKL data (qx, qy, qz) for visualization + +--- + +#### **Terminal 3: HKL 3D Viewer** +```bash +python dashpva.py hkl3d +``` + +1. **Input Channel**: Enter the RSM Consumer Output Channel (e.g., `'processor:rsm:output'`) +2. **Config File**: Browse and select your `metadata_pvs.toml` file +3. Click **"Start Live View"** + +**What it does:** Displays real-time 3D HKL visualization with point cloud data streaming from the RSM consumer. + +--- + +### Important Notes + +1. **Channel Names Must Match**: The output channel of one component must match the input channel of the next: + - Associator Output → Collector Input + - Collector Output → RSM Consumer Input + - RSM Consumer Output → HKL Viewer Input + +2. **TOML File is Critical**: + - The Metadata Associator reads PVs from `[METADATA]` and `[HKL]` sections + - The RSM Consumer uses `[HKL]` section PVs to calculate HKL coordinates + - All motor PVs must be correctly specified in the `[HKL]` section + +3. **Startup Order Matters**: + - Start Detector Viewer first (to verify detector is working) + - Then start PVA Setup and launch consumers in order: Associator → Collector → RSM Consumer + - Finally, start HKL Viewer + +4. **For Different Beamlines**: + - Create a beamline-specific TOML config file + - Update all PV names to match your beamline's EPICS PVs + - The same startup sequence applies, just use your beamline's config file + +### Quick Checklist + +Before starting HKL streaming, ensure: + +- [ ] TOML config file has correct `DETECTOR_PREFIX` +- [ ] `[METADATA]` section has your metadata PVs +- [ ] `[HKL]` section has all motor PVs (sample circle, detector circle) +- [ ] `[HKL.SPEC]` section has energy and UB matrix PVs +- [ ] `[HKL.DETECTOR_SETUP]` section has detector geometry PVs +- [ ] PVA channel names are consistent across all components +- [ ] All consumers are started in the correct order + --- ## Configuration Files @@ -159,8 +542,50 @@ To use a custom configuration, load the file through the ConfigDialog GUI or pla - Verify `.ui` files (e.g., `imageshow.ui`) exist in the `gui/` folder. - Ensure correct paths for configuration files. +### EPICS Database Definition (DBD) Directory Not Found + +If you encounter the error: +``` +Cannot find dbd directory, please set EPICS_DB_INCLUDE_PATH environment variable to use CA metadata PVs. +``` + +This occurs when using CA (Channel Access) metadata PVs in the collector testing script. The script needs to find EPICS database definition files. + +**Solution 1: Set EPICS_DB_INCLUDE_PATH manually** + +Find your EPICS base installation and set the environment variable: + +```bash +# For APS systems, EPICS base is typically at: +export EPICS_DB_INCLUDE_PATH=/APSshare/epics/base-7.0.8/dbd + +# Or if using conda-installed pvapy: +# Find where pvapy is installed, then look for dbd directory +# Usually: $CONDA_PREFIX/share/epics/dbd or similar + +# To find it automatically: +python -c "import pvaccess as pva; import os; print(os.path.dirname(pva.__file__))" +# Then navigate to the dbd directory relative to that location +``` + +**Solution 2: Add to your shell configuration** + +Add to `~/.bashrc` or `~/.zshrc`: +```bash +export EPICS_DB_INCLUDE_PATH=/APSshare/epics/base-7.0.8/dbd +``` + +**Solution 3: Use PVA metadata instead of CA** + +If you don't need CA metadata, use PVA metadata instead: +```bash +# Instead of: -mpv ca://x,ca://y +# Use: -mpv pva://x,pva://y +``` + +**Note:** The script will attempt to auto-detect the dbd directory, but if `pvData` library cannot be found, you must set `EPICS_DB_INCLUDE_PATH` manually. + --- ## Need Help? Refer to the [README.md](README.md) for an overview of the project or contact the repository maintainer for assistance. - diff --git a/consumers/ad_sim_server_modified.py b/consumers/ad_sim_server_modified.py index e438885..f1b6180 100755 --- a/consumers/ad_sim_server_modified.py +++ b/consumers/ad_sim_server_modified.py @@ -251,6 +251,7 @@ def __init__(self, nf, nx, ny, colorMode, datatype, minimum, maximum): self.maximum = maximum self.x_positions, self.y_positions = self.generate_raster_scan_positions(size=self.nscans) self.scan_gen_instance = self.scan_gen(self.x_positions, self.y_positions) + print(f'x_positions: {self.x_positions}') self.generateFrames() np.save('xpos.npy', self.x_positions) np.save('ypos.npy', self.y_positions) @@ -260,7 +261,8 @@ def gaussian_2d(self, x, y, x0, y0, sigma_x, sigma_y, total_intensity): return total_intensity * np.exp(-((x - x0)**2 / (2 * sigma_x**2) + (y - y0)**2 / (2 * sigma_y**2))) def generate_gaussian_peak_array(self,shift_x, shift_y): - size=1024 + sizex = self.nx + sizey = self.ny sigma_x = 14 sigma_y = 20 freq_x = 90 @@ -269,12 +271,12 @@ def generate_gaussian_peak_array(self,shift_x, shift_y): # shift_y = 0 total_intensity = 200 #array = np.zeros((size, size)) - x = np.linspace(0, size - 1, size) - y = np.linspace(0, int(size/2) - 1, int(size/2)) + x = np.linspace(0, sizex - 1, sizex) + y = np.linspace(0, sizey - 1, sizey) x_grid, y_grid = np.meshgrid(x, y) - x0 = (size / 2) * (1 + np.sin(2 * np.pi * freq_x * (x_grid+ shift_x) / size)) - y0 = (size / 2) * (1 + np.sin(2 * np.pi * freq_y * (y_grid+ shift_y) / size)) + x0 = (sizex / 2) * (1 + np.sin(2 * np.pi * freq_x * (x_grid+ shift_x) / sizex)) + y0 = (sizey / 2) * (1 + np.sin(2 * np.pi * freq_y * (y_grid+ shift_y) / sizey)) # for i in range(size): # for j in range(size): # array[i, j] = gaussian_2d(x_grid[i, j], y_grid[i, j], x0[i, j], y0[i, j], sigma_x, sigma_y, total_intensity) diff --git a/consumers/hpc_metadata_consumer.py b/consumers/hpc_metadata_consumer.py index 303438e..099c8e6 100755 --- a/consumers/hpc_metadata_consumer.py +++ b/consumers/hpc_metadata_consumer.py @@ -1,18 +1,29 @@ import time +import copy import numpy as np import pvaccess as pva from pvapy.hpc.adImageProcessor import AdImageProcessor from pvapy.utility.floatWithUnits import FloatWithUnits from pvapy.utility.timeUtility import TimeUtility +import sys +import os +# Add the parent directory to the Python path to find utils module +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import logging +# COPIED FROM hpc_rsm_consumer.py - Compression libraries +import bitshuffle +import blosc2 +import lz4.block +import toml + # Example AD Metadata Processor for the streaming framework # Updates image attributes with values from metadata channels class HpcAdMetadataProcessor(AdImageProcessor): # Acceptable difference between image timestamp and metadata timestamp DEFAULT_TIMESTAMP_TOLERANCE = 0.001 - + MIN_COMPRESS_BYTES = 4098 # Offset that will be applied to metadata timestamp before comparing it with # the image timestamp DEFAULT_METADATA_TIMESTAMP_OFFSET = .001 @@ -31,6 +42,8 @@ def __init__(self, configDict={}): self.nMetadataProcessed = 0 # Number of metadata values associated with images self.nMetadataDiscarded = 0 # Number of metadata values that were discarded self.processingTime = 0 + self.processor_id = configDict.get('collectorId') if 'collectorId' in configDict else configDict.get('metadataId', None) + self.cd = None # Current metadata map self.currentMetadataMap = {} @@ -39,12 +52,59 @@ def __init__(self, configDict={}): # The last object time self.lastFrameTimestamp = 0 + # COPIED FROM hpc_rsm_consumer.py - Type mapping for compression + self.CODEC_PARAMETERS_MAP = { + np.dtype('uint8'): pva.UBYTE, + np.dtype('int8'): pva.BYTE, + np.dtype('uint16'): pva.USHORT, + np.dtype('int16'): pva.SHORT, + np.dtype('uint32'): pva.UINT, + np.dtype('int32'): pva.INT, + np.dtype('uint64'): pva.ULONG, + np.dtype('int64'): pva.LONG, + np.dtype('float32'): pva.FLOAT, + np.dtype('float64'): pva.DOUBLE, + } + + # COPIED FROM hpc_rsm_consumer.py - HKL parameters + self.all_attributes = {} + self.hkl_pv_channels = set() + self.hkl_attributes = {} + self.hkl_config = None + self.config = None + self.old_hkl_attributes = None + self.logger.debug(f'Created HpcAdMetadataProcessor') - # self.logger.setLevel(logging.DEBUG) # Set the logger level to DEBUG - + self.logger.setLevel(logging.DEBUG) # Set the logger level to DEBUG + + # COPIED FROM hpc_rsm_consumer.py - Array compression method + def compress_array(self, hkl_array: np.ndarray, codec_name: str) -> np.ndarray: + + if not isinstance(hkl_array, np.ndarray): + raise TypeError("hkl_array must be a numpy array") + if hkl_array.ndim != 1: + raise ValueError("hkl_array must be a 1D numpy array") + byte_data = hkl_array.tobytes() + typesize = hkl_array.dtype.itemsize + + if codec_name == 'lz4': + compressed = lz4.block.compress(byte_data, store_size=False) + elif codec_name == 'bslz4': + compressed = bitshuffle.compress_lz4(hkl_array) + elif codec_name == 'blosc': + compressed = blosc2.compress( + byte_data, + typesize=typesize + ) + else: + raise ValueError(f"Unsupported codec: {codec_name}") + + # Convert compressed bytes to a uint8 numpy array + return np.frombuffer(compressed, dtype=np.uint8) # Configure user processor def configure(self, configDict): + self.cd = configDict self.logger.debug(f'Configuration update: {configDict}') if 'timestampTolerance' in configDict: self.timestampTolerance = float(configDict.get('timestampTolerance')) @@ -52,6 +112,23 @@ def configure(self, configDict): if 'metadataTimestampOffset' in configDict: self.metadataTimestampOffset = float(configDict.get('metadataTimestampOffset')) self.logger.debug(f'Updated metadata timestamp offset: {self.metadataTimestampOffset} seconds') + + # COPIED FROM hpc_rsm_consumer.py - HKL configuration setup + if 'path' in configDict: + self.path = configDict["path"] + with open(self.path, "r") as config_file: + self.config = toml.load(config_file) + + if 'HKL' in self.config: + self.hkl_config : dict = self.config['HKL'] + for section in self.hkl_config.values(): # every section holds a dict + for channel in section.values(): # the values of each seciton is the pv name string + self.hkl_pv_channels.add(channel) + + with open('error_output.txt','w') as f: + f.write(str(configDict)) + self.logger(configDict) + #self.processor_id = configDict.get('collectorId') if 'collectorId' in configDict else configDict.get('metadataId', None) # Associate metadata # Returns true on success, false on definite failure, none on failure/try another @@ -89,16 +166,20 @@ def associateMetadata(self, mdChannel, frameId, frameTimestamp, frameAttributes) pv = pva.PvScalarArray(pva.DOUBLE) pv.set(mdValue.tolist()) nt_attribute = {'name': mdChannel, 'value': pv} - except ValueError: - self.logger.error(f"Failed to set ndAttribute {mdChannel}: {mdValue}") + elif isinstance(mdValue, bool): + nt_attribute = {'name':mdChannel, 'value': pva.PvBoolean(mdValue)} + else: + raise ValueError(f'Failed to create metadata attribute: {mdChannel}: {mdValue}') + frameAttributes.append(nt_attribute) + except Exception as e: + self.logger.error(f"[Metadata Associator] Error associatating metadata {e}") return False diff = abs(frameTimestamp - mdTimestamp2) self.logger.debug(f'Metadata {mdChannel} has value of {mdValue}, timestamp: {mdTimestamp} (with offset: {mdTimestamp2}), timestamp diff: {diff}') # Here is where any logic with time offsets would go # Attach Metadata no matter what - frameAttributes.append(nt_attribute) self.nMetadataProcessed += 1 return True @@ -161,8 +242,6 @@ def process(self, pvObject): # Definite failure associationFailed = True break - - # #debug # if 'attribute' in pvObject: # frameAttributes = pvObject['attribute'] # print(f"DEBUG !! Original frame attributes: {frameAttributes}") @@ -171,15 +250,103 @@ def process(self, pvObject): self.nFrameErrors += 1 else: self.nFramesProcessed += 1 - - pvObject['attribute'] = frameAttributes + + #pvObject['attribute'] = frameAttributes + proc_time_start = pva.PvObject({'value': pva.DOUBLE}) + proc_time_start['value'] = t0 # seconds, or multiply by 1000.0 for ms + frameAttributes.append({ + 'name': f'procTimeStart_{self.__class__.__name__}{self.processor_id}' , + 'value': proc_time_start + }) + proc_time_end = pva.PvObject({'value': pva.DOUBLE}) + proc_time_end['value'] = time.time() # seconds, or multiply by 1000.0 for ms + frameAttributes.append({ + 'name': f'procTimeEnd_{self.__class__.__name__}{self.processor_id}', + 'value': proc_time_end + }) + proc_time = pva.PvObject({'value': pva.DOUBLE}) + proc_time['value'] = (time.time() - t0) # seconds, or multiply by 1000.0 for ms + frameAttributes.append({ + 'name': f'procTime_{self.__class__.__name__}{self.processor_id}', + 'value': proc_time + }) + + + self.compress_image(pvObject) + + pvObject['attribute'] = frameAttributes self.updateOutputChannel(pvObject) self.lastFrameTimestamp = frameTimestamp t1 = time.time() self.processingTime += (t1-t0) return pvObject + + def compress_image(self, pvObject) -> None: + # Original bytes: 2097152, Compressed bytes: 55418, Codec: lz4, Image size 1024x1024 + # Ratio 600 : 16 + # If already compressed, do nothing + try: + codec_name = pvObject['codec']['name'] + except Exception: + codec_name = '' + if codec_name: + return + + # Extract active union field and its array + union_dict = pvObject['value'][0] + field_name = next(iter(union_dict)) + pv_arr = union_dict[field_name] + data_list = pv_arr.get() if hasattr(pv_arr, 'get') else pv_arr + + # Map union field to numpy dtype + UNION_FIELD_TO_DTYPE = { + 'ubyteValue': np.uint8, + 'byteValue': np.int8, + 'ushortValue': np.uint16, + 'shortValue': np.int16, + 'uintValue': np.uint32, + 'intValue': np.int32, + 'ulongValue': np.uint64, + 'longValue': np.int64, + 'floatValue': np.float32, + 'doubleValue': np.float64, + } + dtype = UNION_FIELD_TO_DTYPE.get(field_name, None) + arr = np.asarray(data_list, dtype=dtype) if dtype is not None else np.asarray(data_list) + arr_c = np.ascontiguousarray(arr) + raw = arr_c.tobytes() + raw_len = arr_c.nbytes + + original_enum = self.CODEC_PARAMETERS_MAP.get(arr_c.dtype, None) + + # Compress and decide + if raw_len >= self.MIN_COMPRESS_BYTES: + comp = lz4.block.compress(raw, store_size=False) + if len(comp) < raw_len: + comp_data, codec = comp, 'lz4' + else: + comp_data, codec = raw, 'none' + else: + comp_data, codec = raw, 'none' + + if codec == 'lz4': + # Compressed path: put bytes under UBYTE union branch + arr_u8 = np.frombuffer(comp_data, dtype=np.uint8) + # PvAccess expects a list for union array values + pvObject['value'] = ({'ubyteValue': arr_u8.tolist()},) + pvObject['codec']['name'] = 'lz4' + pvObject['codec']['parameters'] = ({'value': int(original_enum)},) if original_enum is not None else () + pvObject['uncompressedSize'] = raw_len + else: + # Leave original branch and clear codec + pvObject['codec']['name'] = '' + pvObject['codec']['parameters'] = ({'value': int(original_enum)},) if original_enum is not None else () + pvObject['uncompressedSize'] = raw_len + # Debug + #msg = f"Original bytes: {raw_len}, Compressed bytes: {len(comp_data)}, Codec: {codec}" * 10 + #print(msg) + - # Reset statistics for user processor def resetStats(self): self.nFramesProcessed = 0 self.nFrameErrors = 0 @@ -201,7 +368,7 @@ def getStats(self): 'nMetadataDiscarded' : self.nMetadataDiscarded, 'processingTime' : FloatWithUnits(self.processingTime, 's'), 'processedFrameRate' : FloatWithUnits(processedFrameRate, 'fps'), - 'frameErrorRate' : FloatWithUnits(frameErrorRate, 'fps') + 'frameErrorRate' : FloatWithUnits(frameErrorRate, 'fps'), } # Define PVA types for different stats variables @@ -213,5 +380,4 @@ def getStatsPvaTypes(self): 'nMetadataDiscarded' : pva.UINT, 'processingTime' : pva.DOUBLE, 'processedFrameRate' : pva.DOUBLE, - 'frameErrorRate' : pva.DOUBLE } diff --git a/consumers/hpc_rsm_consumer.py b/consumers/hpc_rsm_consumer.py index 9a10f4d..1ac636d 100644 --- a/consumers/hpc_rsm_consumer.py +++ b/consumers/hpc_rsm_consumer.py @@ -1,182 +1,494 @@ +import time +import copy +import toml +import bitshuffle +import blosc2 +import lz4.block import numpy as np import pvaccess as pva -from pvaccess import PvObject +import xrayutilities as xu +from pvaccess import PvObject, NtAttribute from pvapy.hpc.adImageProcessor import AdImageProcessor from pvapy.utility.floatWithUnits import FloatWithUnits -import xrayutilities as xu -import time +from pvapy.utility.timeUtility import TimeUtility +# logging +import traceback class HpcRsmProcessor(AdImageProcessor): + def __init__(self, configDict={}): super(HpcRsmProcessor, self).__init__(configDict) + + # Config Variables + self.path = None + self.hkl_config = None # Statistics self.nFramesProcessed = 0 self.nFrameErrors = 0 + self.nMetadataProcessed = 0 + self.nMetadataDiscarded = 0 self.processingTime = 0 + + # Type Mapping + self.CODEC_PARAMETERS_MAP = { + np.dtype('uint8'): pva.UBYTE, + np.dtype('int8'): pva.BYTE, + np.dtype('uint16'): pva.USHORT, + np.dtype('int16'): pva.SHORT, + np.dtype('uint32'): pva.UINT, + np.dtype('int32'): pva.INT, + np.dtype('uint64'): pva.ULONG, + np.dtype('int64'): pva.LONG, + np.dtype('float32'): pva.FLOAT, + np.dtype('float64'): pva.DOUBLE, + + } + + # Reverse mapping from PVA codec enum to numpy dtype for decompression + self.PVA_TO_NUMPY_DTYPE_MAP = { + pva.UBYTE: np.uint8, + pva.BYTE: np.int8, + pva.USHORT: np.uint16, + pva.SHORT: np.int16, + pva.UINT: np.uint32, + pva.INT: np.int32, + pva.ULONG: np.uint64, + pva.LONG: np.int64, + pva.FLOAT: np.float32, + pva.DOUBLE: np.float64, + } + + # Mapping from union field name to numpy dtype for uncompressed payloads + self.UNION_FIELD_TO_DTYPE = { + 'ubyteValue': np.uint8, + 'byteValue': np.int8, + 'ushortValue': np.uint16, + 'shortValue': np.int16, + 'uintValue': np.uint32, + 'intValue': np.int32, + 'ulongValue': np.uint64, + 'longValue': np.int64, + 'floatValue': np.float32, + 'doubleValue': np.float64, + } + + # PV attributes + self.shape : tuple = (0,0) + self.type_dict = { + 'codec':{ + 'name': pva.STRING, + 'parameters': pva.INT}, + 'qx': { + 'compressedSize': pva.LONG, + 'uncompressedSize': pva.LONG, + 'value':[pva.DOUBLE]}, + 'qy': { + 'compressedSize': pva.LONG, + 'uncompressedSize': pva.LONG, + 'value':[pva.DOUBLE]}, + 'qz': { + 'compressedSize': pva.LONG, + 'uncompressedSize': pva.LONG, + 'value':[pva.DOUBLE]} + } + + self.type_dict_compressed = { + 'codec':{ + 'name': pva.STRING, + 'parameters': pva.INT}, + 'qx': { + 'compressedSize': pva.LONG, + 'uncompressedSize': pva.LONG, + 'value':[pva.UBYTE,]}, + 'qy': { + 'compressedSize': pva.LONG, + 'uncompressedSize': pva.LONG, + 'value':[pva.UBYTE,]}, + 'qz': { + 'compressedSize': pva.LONG, + 'uncompressedSize': pva.LONG, + 'value':[pva.UBYTE,]} + } # HKL parameters - self.hkl_data = None + self.all_attributes = {} + self.hkl_pv_channels = set() + self.hkl_attributes = {} + self.old_attrbutes : dict = None self.q_conv = None - self.shape = None self.qx = None self.qy = None - self.qz = None - - # Configure from dictionary + self.qz = None + self.configure(configDict) def configure(self, configDict): """Configure processor settings and initialize HKL parameters""" self.logger.debug(f'Configuration update: {configDict}') + + if 'path' in configDict: + self.path = configDict["path"] + with open(self.path, "r") as config_file: + self.config = toml.load(config_file) + + if 'HKL' in self.config: + self.hkl_config : dict = self.config['HKL'] + for section in self.hkl_config.values(): # every section holds a dict + for channel in section.values(): # the values of each seciton is the pv name string + self.hkl_pv_channels.add(channel) + + def parse_hkl_ndattributes(self, pva_object): + """ + Parse the NDAttributes from the PVA Object into a python dict. + Store attributes in self.all_attributes for easy reference. + """ + if pva_object is None: + return + # obj_dict : dict = pva_object.get() + attributes : list = pva_object['attribute'] + hkl_attributes = {} + for attr in attributes: # list of attribute dictionaries + name = attr['name'] + value = attr['value'][0]['value'] + self.all_attributes[name] = value + if name in self.hkl_pv_channels: + hkl_attributes[name] = value + return hkl_attributes + + def get_sample_and_detector_circles(self, hkl_attr: dict): + # lists for sample circle parameters + sample_circle_directions = [] + sample_circle_positions = [] + # lists for detector circles + det_circle_directions = [] + det_circle_positions = [] + + if len(hkl_attr) == len(self.hkl_pv_channels): + # loop sorting pv channels + for section, pv_dict in self.hkl_config.items(): + if section.startswith('SAMPLE_CIRCLE'): + for pv_name in pv_dict.values(): + if pv_name.endswith('DirectionAxis'): + sample_circle_directions.append(hkl_attr[pv_name]) + elif pv_name.endswith('Position'): + sample_circle_positions.append(hkl_attr[pv_name]) + elif section.startswith('DETECTOR_CIRCLE'): + for pv_name in pv_dict.values(): + if pv_name.endswith('DirectionAxis'): + det_circle_directions.append(hkl_attr[pv_name]) + elif pv_name.endswith('Position'): + det_circle_positions.append(hkl_attr[pv_name]) + + return sample_circle_directions, sample_circle_positions, det_circle_directions, det_circle_positions + + def get_axis_directions(self, hkl_attr: dict): + # Get beam and reference directions + if len(hkl_attr) == len(self.hkl_pv_channels): + primary_beam_directions = [hkl_attr.get(f'PrimaryBeamDirection:AxisNumber{i}', None) for i in range(1,4)] + inplane_beam_direction = [hkl_attr.get(f'InplaneReferenceDirection:AxisNumber{i}', None) for i in range(1,4)] + sample_surface_normal_direction = [hkl_attr.get(f'SampleSurfaceNormalDirection:AxisNumber{i}', None) for i in range(1,4)] + + return primary_beam_directions, inplane_beam_direction, sample_surface_normal_direction + else: + return None, None, None - if "HKL" in configDict: - self.hkl_data = configDict["HKL"] - self.init_hkl() - - def init_hkl(self): - """Initialize HKL parameters from config""" - if self.hkl_data: - # Get sample circle parameters - sample_circle_keys = [key for key in self.hkl_data.keys() if key.startswith('SampleCircle')] - self.sample_circle_directions = [] - self.sample_circle_positions = [] - for key in sample_circle_keys: - self.sample_circle_directions.append(self.hkl_data[key]['DirectionAxis']) - self.sample_circle_positions.append(self.hkl_data[key]['Position']) - - # Get detector circle parameters - det_circle_keys = [key for key in self.hkl_data.keys() if key.startswith('DetectorCircle')] - self.det_circle_directions = [] - self.det_circle_positions = [] - for key in det_circle_keys: - self.det_circle_directions.append(self.hkl_data[key]['DirectionAxis']) - self.det_circle_positions.append(self.hkl_data[key]['Position']) - - # Get beam and reference directions - self.primary_beam_directions = [self.hkl_data['PrimaryBeamDirection'][axis] for axis in self.hkl_data['PrimaryBeamDirection'].keys()] - self.inplane_beam_direction = [self.hkl_data['InplaneReferenceDirection'][axis] for axis in self.hkl_data['InplaneReferenceDirection'].keys()] - self.sample_surface_normal_direction = [self.hkl_data['SampleSurfaceNormalDirection'][axis] for axis in self.hkl_data['SampleSurfaceNormalDirection'].keys()] + def get_ub_matrix(self, hkl_attr: dict): + ub_matrix_key = self.hkl_config['SPEC'].get('UB_MATRIX_VALUE', '') + + return hkl_attr[ub_matrix_key] + + def get_energy(self, hkl_attr: dict): + energy_key = self.hkl_config['SPEC'].get('ENERGY_VALUE', '') + + return hkl_attr[energy_key] + + def create_rsm(self, hkl_attr: dict, shape: tuple): + """Calculate reciprocal space mapping""" + try: + # get Sample and Detection Circle positions and directions from hkl attributes + sample_circle_directions, sample_circle_positions, det_circle_directions, det_circle_positions = self.get_sample_and_detector_circles(hkl_attr) + # get all axis directions for primary beam, inplane beam, and sample surface normal from hkl attributes + primary_beam_directions, inplane_beam_direction, sample_surface_normal_direction = self.get_axis_directions(hkl_attr) + # get UB matrix and energy + ub_matrix = self.get_ub_matrix(hkl_attr) + ub_matrix = np.reshape(ub_matrix, (3,3)) + energy = self.get_energy(hkl_attr) * 1000 # Initialize QConversion - self.q_conv = xu.experiment.QConversion( - self.sample_circle_directions, - self.det_circle_directions, - self.primary_beam_directions + q_conv = xu.experiment.QConversion( + sample_circle_directions, + det_circle_directions, + primary_beam_directions ) + # Initialize HXRD + hxrd = xu.HXRD(inplane_beam_direction, + sample_surface_normal_direction, + en=energy, + qconv=q_conv) + + # Set up detector parameters + roi = [0, shape[0], 0, shape[1]] + pixel_dir1 = hkl_attr['DetectorSetup:PixelDirection1'] + pixel_dir2 = hkl_attr['DetectorSetup:PixelDirection2'] + cch1 = hkl_attr['DetectorSetup:CenterChannelPixel'][0] + cch2 = hkl_attr['DetectorSetup:CenterChannelPixel'][1] + nch1 = shape[0] + nch2 = shape[1] + pixel_width1 = hkl_attr['DetectorSetup:Size'][0] / nch1 + pixel_width2 = hkl_attr['DetectorSetup:Size'][1] / nch2 + distance = hkl_attr['DetectorSetup:Distance'] - # Get UB matrix and energy - self.ub_matrix = np.reshape(self.hkl_data['UBMatrix']['Value'], (3,3)) - self.energy = self.hkl_data['Energy']['Value'] * 1000 + hxrd.Ang2Q.init_area( + pixel_dir1, pixel_dir2, + cch1=cch1, cch2=cch2, + Nch1=nch1, Nch2=nch2, + pwidth1=pixel_width1, + pwidth2=pixel_width2, + distance=distance, + roi=roi + ) - def create_rsm(self, shape): - """Calculate reciprocal space mapping""" - if not self.hkl_data or not self.q_conv: + angles = [*sample_circle_positions, *det_circle_positions] + return hxrd.Ang2Q.area(*angles, UB=ub_matrix) + except Exception as e: + with open("error_output1.txt", "w") as f: + f.write(str(e)) return None, None, None + + def attributes_diff(self, hkl_attr: dict, old_attr: dict) -> bool: + # if len(previous_data) != len(metadata): + # dicts_equal = False + # else: + for key, value in hkl_attr.items(): + if isinstance(value, np.ndarray): + arrs_equal = np.array_equal(value, old_attr[key]) + if not arrs_equal: + return True + elif old_attr[key] != hkl_attr[key]: + return True + else: + return False + + def decompress_image(self, pvObject): + """Return image pixels as a NumPy array, handling compressed (lz4) and uncompressed payloads. + Kept intentionally simple: no dict.get usage on PvObject, no reshaping. + """ + codec_name = pvObject['codec']['name'] + if codec_name == 'lz4': + # Extract compressed bytes from UBYTE union branch + u8_pv = pvObject['value'][0]['ubyteValue'] + u8_list = u8_pv.get() if hasattr(u8_pv, 'get') else u8_pv + comp_bytes = np.asarray(u8_list, dtype=np.uint8).tobytes() + # Decompress using explicit uncompressed size (store_size=False was used) + out_bytes = lz4.block.decompress(comp_bytes, uncompressed_size=pvObject['uncompressedSize']) + # Decode original dtype from codec.parameters + params = pvObject['codec']['parameters'] + enum = params[0]['value'] if (isinstance(params, tuple) and len(params) > 0) else pva.UBYTE + dtype = self.PVA_TO_NUMPY_DTYPE_MAP.get(enum, np.uint8) + # DEBUG + # uncompressed_size = pvObject['uncompressedSize'] + # compressed_size = len(comp_bytes) + # msg = f'Decompressing [lz4]: Compressed Size: {compressed_size}, Uncompressed Size: {uncompressed_size}'*10 + # print(msg) + return np.frombuffer(out_bytes, dtype=dtype) + # Non-compressed path: convert the active union field to NumPy + union_dict = pvObject['value'][0] + field_name = next(iter(union_dict)) + pv_arr = union_dict[field_name] + data_list = pv_arr.get() if hasattr(pv_arr, 'get') else pv_arr + dtype = self.UNION_FIELD_TO_DTYPE.get(field_name, None) + # msg = f"Handling [uncompressed]: Field: {field_name}, Array Length: {len(data_list)} Compressed Size" *10 + # print(msg) + return np.asarray(data_list, dtype=dtype) if dtype is not None else np.asarray(data_list) + + + def compress_array(self, hkl_array: np.ndarray, codec_name: str) -> np.ndarray: + if not isinstance(hkl_array, np.ndarray): + raise TypeError("hkl_array must be a numpy array") + if hkl_array.ndim != 1: + raise ValueError("hkl_array must be a 1D numpy array") + byte_data = hkl_array.tobytes() + typesize = hkl_array.dtype.itemsize - hxrd = xu.HXRD(self.inplane_beam_direction, - self.sample_surface_normal_direction, - en=self.energy, - qconv=self.q_conv) - - # Set up detector parameters - roi = [0, shape[0], 0, shape[1]] - pixel_dir1 = self.hkl_data['DetectorSetup']['PixelDirection1'] - pixel_dir2 = self.hkl_data['DetectorSetup']['PixelDirection2'] - cch1 = self.hkl_data['DetectorSetup']['CenterChannelPixel'][0] - cch2 = self.hkl_data['DetectorSetup']['CenterChannelPixel'][1] - nch1 = shape[0] - nch2 = shape[1] - pixel_width1 = self.hkl_data['DetectorSetup']['Size'][0] / nch1 - pixel_width2 = self.hkl_data['DetectorSetup']['Size'][1] / nch2 - distance = self.hkl_data['DetectorSetup']['Distance'] - - hxrd.Ang2Q.init_area( - pixel_dir1, pixel_dir2, - cch1=cch1, cch2=cch2, - Nch1=nch1, Nch2=nch2, - pwidth1=pixel_width1, - pwidth2=pixel_width2, - distance=distance, - roi=roi - ) - - angles = [*self.sample_circle_positions, *self.det_circle_positions] - return hxrd.Ang2Q.area(*angles, UB=self.ub_matrix) + if codec_name == 'lz4': + compressed = lz4.block.compress(byte_data, store_size=False) + elif codec_name == 'bslz4': + compressed = bitshuffle.compress_lz4(hkl_array) + elif codec_name == 'blosc': + compressed = blosc2.compress( + byte_data, + typesize=typesize + ) + else: + raise ValueError(f"Unsupported codec: {codec_name}") + + # Convert compressed bytes to a uint8 numpy array + return np.frombuffer(compressed, dtype=np.uint8) def process(self, pvObject): t0 = time.time() + + dims = pvObject['dimension'] + nDims = len(dims) + if not nDims: + # Frame has no image data + return pvObject + + if 'timeStamp' not in pvObject: + # No timestamp, just return the object + return pvObject + + if 'attribute' not in pvObject: + print('attributes not in pvObject') + return pvObject + # Optionally decode image data for local use, but do not modify pvObject['value'] + _ = self.decompress_image(pvObject) + + self.hkl_attributes = self.parse_hkl_ndattributes(pvObject) + self.shape = tuple([dim['size'] for dim in dims]) + + if self.old_attrbutes is not None: + attributes_diff = self.attributes_diff(self.hkl_attributes, self.old_attrbutes) + else: + attributes_diff = True + self.old_attrbutes = copy.deepcopy(self.hkl_attributes) + + if attributes_diff: + # Only recalculate qxyz if there are new attributes + qxyz = self.create_rsm(self.hkl_attributes, self.shape) + self.qx: np.ndarray = np.ravel(qxyz[0]) + self.qy: np.ndarray = np.ravel(qxyz[1]) + self.qz: np.ndarray = np.ravel(qxyz[2]) + self.codec_name = pvObject['codec']['name'] + self.original_dtype = self.qx.dtype if self.qx.dtype == self.qy.dtype == self.qz.dtype else np.dtype('float64') + self.codec_parameters = int(self.CODEC_PARAMETERS_MAP.get(self.original_dtype, None)) if self.codec_name else -1 + self.uncompressed_size = self.qx.nbytes if self.qx.nbytes == self.qy.nbytes == self.qz.nbytes else np.prod(self.shape) * self.original_dtype.itemsize + self.compressed_size_qx = self.uncompressed_size + self.compressed_size_qy = self.uncompressed_size + self.compressed_size_qz = self.uncompressed_size + + if self.codec_name != '': + self.qx = self.compress_array(self.qx, self.codec_name) + self.qy = self.compress_array(self.qy, self.codec_name) + self.qz = self.compress_array(self.qz, self.codec_name) + self.compressed_size_qx = self.qx.shape[0] + self.compressed_size_qy = self.qy.shape[0] + self.compressed_size_qz = self.qz.shape[0] + try: - # Get frame dimensions - if 'dimension' in pvObject: - dims = pvObject['dimension'] - self.shape = tuple([dim['size'] for dim in dims]) + # Create RSM data structure + rsm_data = { + 'codec':{ + 'name': self.codec_name, + 'parameters': self.codec_parameters}, + 'qx': { + 'compressedSize': int(self.compressed_size_qx), + 'uncompressedSize': int(self.uncompressed_size), + 'value':self.qx}, + 'qy': { + 'compressedSize': int(self.compressed_size_qy), + 'uncompressedSize': int(self.uncompressed_size), + 'value':self.qy}, + 'qz': { + 'compressedSize': int(self.compressed_size_qz), + 'uncompressedSize': int(self.uncompressed_size), + 'value':self.qz}, + } - # Calculate RSM - qx, qy, qz = self.create_rsm(self.shape) - - if qx is not None: - # Create RSM data structure - rsm_data = { - 'qx': qx.tolist(), - 'qy': qy.tolist(), - 'qz': qz.tolist() - } - - # Create attribute - rsm_attribute = { - 'name': 'RSM', - 'value': [{'value': rsm_data}] - } - - # Get or create attribute list - if 'attribute' not in pvObject: - pvObject['attribute'] = [] - - # Add RSM attribute - pvObject['attribute'].append(rsm_attribute) - - self.nFramesProcessed += 1 - else: - self.nFrameErrors += 1 + # Create PV object to hold RSM attributes + if self.codec_name != '': + rsm_object = {'name': 'RSM', 'value': PvObject({'value': self.type_dict_compressed}, {'value': rsm_data})} + else: + rsm_object = {'name': 'RSM', 'value': PvObject({'value': self.type_dict}, {'value': rsm_data})} + + # append the new attributes + frameAttributes = pvObject['attribute'] + frameAttributes.append(rsm_object) + #pvObject['attribute'] = frameAttributes + self.nFramesProcessed += 1 + + # Update stats + frameTimestamp = TimeUtility.getTimeStampAsFloat(pvObject['timeStamp']) + self.lastFrameTimestamp = frameTimestamp + self.nFramesProcessed += 1 + procTimes = {} + + proc_time_start = pva.PvObject({'value': pva.DOUBLE}) + proc_time_start['value'] = t0 # seconds, or multiply by 1000.0 for ms + frameAttributes.append({ + 'name': f'procTimeStart_{self.__class__.__name__}', + 'value': proc_time_start + }) + proc_time_end = pva.PvObject({'value': pva.DOUBLE}) + proc_time_end['value'] = time.time() # seconds, or multiply by 1000.0 for ms + frameAttributes.append({ + 'name': f'procTimeEnd_{self.__class__.__name__}', + 'value': proc_time_end + }) + proc_time = pva.PvObject({'value': pva.DOUBLE}) + proc_time['value'] = (time.time() - t0) # seconds, or multiply by 1000.0 for ms + frameAttributes.append({ + 'name': f'procTime_{self.__class__.__name__}', + 'value': proc_time + }) + + pvObject['attribute'] = frameAttributes - # Update output channel self.updateOutputChannel(pvObject) - + + # Update processing time + t1 = time.time() + self.processingTime += (t1 - t0) + + return pvObject + except Exception as e: - self.logger.error(f"Error processing frame: {str(e)}") self.nFrameErrors += 1 - - t1 = time.time() - self.processingTime += (t1 - t0) - - return pvObject + with open("error_output2.txt", "w") as f: + f.writelines([''.join(traceback.format_exception(None, e, e.__traceback__))]) + return pvObject def resetStats(self): - self.nFramesProcessed = 0 - self.nFrameErrors = 0 + """ + Reset processor statistics. + """ + self.nFramesProcessed = 0 + self.nFrameErrors = 0 + self.nMetadataProcessed = 0 + self.nMetadataDiscarded = 0 self.processingTime = 0 def getStats(self): + """ + Get current statistics of processing. + """ processedFrameRate = 0 frameErrorRate = 0 if self.processingTime > 0: processedFrameRate = self.nFramesProcessed / self.processingTime frameErrorRate = self.nFrameErrors / self.processingTime - return { - 'nFramesProcessed': self.nFramesProcessed, - 'nFrameErrors': self.nFrameErrors, - 'processingTime': FloatWithUnits(self.processingTime, 's'), - 'processedFrameRate': FloatWithUnits(processedFrameRate, 'fps'), - 'frameErrorRate': FloatWithUnits(frameErrorRate, 'fps') + return { + 'nFramesProcessed' : self.nFramesProcessed, + 'nFrameErrors' : self.nFrameErrors, + 'nMetadataProcessed' : self.nMetadataProcessed, + 'nMetadataDiscarded' : self.nMetadataDiscarded, + 'processingTime' : FloatWithUnits(self.processingTime, 's'), + 'processedFrameRate' : FloatWithUnits(processedFrameRate, 'fps'), + 'frameErrorRate' : FloatWithUnits(frameErrorRate, 'fps') } def getStatsPvaTypes(self): - return { - 'nFramesProcessed': pva.UINT, - 'nFrameErrors': pva.UINT, - 'processingTime': pva.DOUBLE, - 'processedFrameRate': pva.DOUBLE, - 'frameErrorRate': pva.DOUBLE - } \ No newline at end of file + """ + Define PVA types for different stats variables. + """ + return { + 'nFramesProcessed' : pva.UINT, + 'nFrameErrors' : pva.UINT, + 'nMetadataProcessed' : pva.UINT, + 'nMetadataDiscarded' : pva.UINT, + 'processingTime' : pva.DOUBLE, + 'processedFrameRate' : pva.DOUBLE, + 'frameErrorRate' : pva.DOUBLE + } diff --git a/consumers/hpc_spontaneous_analysis_consumer.py b/consumers/hpc_spontaneous_analysis_consumer.py index 3d14a5b..1b5a88b 100755 --- a/consumers/hpc_spontaneous_analysis_consumer.py +++ b/consumers/hpc_spontaneous_analysis_consumer.py @@ -76,11 +76,6 @@ def parse_pva_ndattributes(self, pva_object): value = attr['value'] attributes[name] = value - # Include additional values commonly found at top-level for completeness. - for value_key in ["codec", "uniqueId", "uncompressedSize"]: - if value_key in pva_object: - attributes[value_key] = pva_object[value_key] - self.attributes = attributes def pva_to_image(self, pva_object): @@ -169,12 +164,13 @@ def process(self, pvObject): # Now create a PvObject with the analysis results # We will send out a single data point (X, Y, Intensity, ComX, ComY) analysis_object = PvObject({'value':{'Axis1': DOUBLE, 'Axis2': DOUBLE, - 'Intensity': DOUBLE, 'ComX': DOUBLE, 'ComY': DOUBLE}}, - {'value':{'Axis1': float(x_value), - 'Axis2': float(y_value), - 'Intensity': float(intensity), - 'ComX': float(com_x), - 'ComY': float(com_y)}}) + 'Intensity': DOUBLE, + 'ComX': DOUBLE, + 'ComY': DOUBLE}}, + {'value':{'Axis1': float(x_value),'Axis2': float(y_value), + 'Intensity': float(intensity), + 'ComX': float(com_x), + 'ComY': float(com_y)}}) # Create an NtAttribute to hold this analysis data pvAttr = pva.NtAttribute('Analysis', analysis_object) diff --git a/consumers/pv_monitor_from_collector.py b/consumers/pv_monitor_from_collector.py index 93f6991..40ef63c 100755 --- a/consumers/pv_monitor_from_collector.py +++ b/consumers/pv_monitor_from_collector.py @@ -6,7 +6,6 @@ last_print_time = time.time() total_intensities = [] -metadata0_list = [] import matplotlib.pyplot as plt @@ -21,10 +20,12 @@ def monitor_callback(data): print("\nReceived data:") # print(data) + print(data['uniqueId']) # image_data = data['value'][0]['ubyteValue'] # image_data = data['value'][0]['uintValue'] - print(data.get()) - image_data = data['value'][0]['ushortValue'] + # print(data.get()) + # print(data.getIntrospectionDict()) + # image_data = data['value'][0]['ushortValue'] # print(data.has_key('uncompressedSize')) # print(data['compressedSize'], data['uncompressedSize']) # print(data.get()) @@ -35,21 +36,55 @@ def monitor_callback(data): # print(f"Image data length: {len(image_data)}") - metadata = {} - if 'attribute' in data: - attributes = data['attribute'] - for attr in attributes: - name = attr['name'] - value = attr['value'] - metadata[name] = value - metadata["uniqueId"] = data["uniqueId"] - - print("\nMetadata:") - for channel, value in metadata.items(): - print(f"{channel}: {value}") - if channel == "processor:1:analysis": - print(f"appending {value[0]['value']}") - metadata0_list.append(value[0]['value']) + # metadata = {} + # if 'attribute' in data: + # attributes = data['attribute'] + # for attr in attributes: + # name = attr['name'] + # value = attr['value'][0]['value'] + # metadata[name] = value + + # print("\nMetadata:") + # for channel, value in metadata.items(): + # print(f"{channel} = {value}") + # print(f"attributes diff = {metadata['RSM']['attributes_diff']}") + + + # if previous_data is not None: + # if len(previous_data) != len(metadata): + # dicts_equal = False + # else: + # for key, value in metadata.items(): + # if key not in previous_data: + # dicts_equal = False + # print(key) + # break + # if isinstance(value, np.ndarray): + # arrs_equal = np.array_equal(value, previous_data[key]) + # if not arrs_equal: + # dicts_equal = False + # print(key) + # break + # elif previous_data[key] != metadata[key]: + # dicts_equal = False + # print(key) + # break + # else: + # dicts_equal = True + + # print(f'Current equals Previous: {dicts_equal}') + + # previous_data = metadata + # metadata["uniqueId"] = data["uniqueId" + + + # print([metadata[f'PrimaryBeamDirection:AxisNumber{i}'] for i in range(1,4)]) + # primary_beam_directions = [metadata.get(f'PrimaryBeamDirection:AxisNumber{i}', None) for i in range(1,4)] + # inplane_beam_direction = [metadata.get(f'PrimaryBeamDirection:AxisNumber{i}', None) for i in range(1,4)] + # sample_surface_normal_direction = [metadata.get(f'SampleSurfaceNormalDirection:AxisNumber{i}', None) for i in range(1,4)] + + # print(primary_beam_directions, inplane_beam_direction, sample_surface_normal_direction) + # if previous_data is not None: # images_equal = (image_data == previous_data).all() @@ -66,16 +101,17 @@ def monitor_callback(data): print('data recorded!') -# collector_channel = pva.Channel('DetectorSetup:Name', pva.CA) -# collector_channel = pva.Channel("processor:1:analysis", pva.PVA) -collector_channel = pva.Channel("pvapy:image", pva.PVA) +# collector_channel = pva.Channel('processor:1:output', pva.PVA) +# collector_channel = pva.Channel("processor:10:analysis", pva.PVA) +collector_channel = pva.Channel("collector:1:output", pva.PVA) +# collector_channel = pva.Channel("collector2:1:output", pva.PVA) + collector_channel.subscribe("monitor", monitor_callback) collector_channel.startMonitor() try: while True: time.sleep(0.1) - # print(f"{total_intensities}, {metadata0_list}") except KeyboardInterrupt: collector_channel.stopMonitor() diff --git a/consumers/sim_rsm_data.py b/consumers/sim_rsm_data.py index a643a3e..6bb9d9f 100644 --- a/consumers/sim_rsm_data.py +++ b/consumers/sim_rsm_data.py @@ -21,7 +21,9 @@ import tempfile import ctypes.util import math +import numpy as np import pvaccess as pva # pva module provides CaIoc and related functions +from epics import camonitor, caget # ------------------------------- # Define PV Record Parameters @@ -39,13 +41,13 @@ "AxisNumber": 2, "SpecMotorName": "Eta", "DirectionAxis": "z-", - "Position": 10.74625, + "Position": 0.0, }, "6idb1:m19.RBV": { "AxisNumber": 3, "SpecMotorName": "Chi", "DirectionAxis": "y+", - "Position": 90.14, + "Position": 0.0 }, "6idb1:m20.RBV": { "AxisNumber": 4, @@ -63,7 +65,7 @@ "AxisNumber": 2, "SpecMotorName": "Delta", "DirectionAxis": "z-", - "Position": 70.035125, + "Position": 0.0, }, } @@ -77,7 +79,7 @@ "Value": [1, 0, 0, 0, 1, 0, 0, 0, 1] } energy_record = { - "Value": 5.212 # keV + "Value": 11.212 # keV } # Additional static records based on HKL configuration: @@ -101,9 +103,18 @@ "PixelDirection2": "x+", "CenterChannelPixel": [500, 500], "Size": [100, 100], - "Distance": 20.644, + "Distance": 400.644, "Units": "mm" } +scan_on_record = { + 'Value': False +} +file_path_record = { + 'Value': '' +} +file_name_record = { + 'Value': '' +} # ------------------------------- # Helper: Convert to Valid EPICS Record Name @@ -138,16 +149,23 @@ def get_record_definition(name, value_type) -> str: field(VAL, "") } """ % name + elif isinstance(value_type, bool): + return """ + record(longout, "%s") { + field(DTYP, "Soft Channel") + } + """ % name elif isinstance(value_type, list) and all(isinstance(x, (int, float)) for x in value_type): # For numeric arrays (like UB_matrix) return """ record(waveform, "%s") { field(DTYP, "Soft Channel") field(FTVL, "DOUBLE") - field(NELM, "64") + field(NELM, "9") } """ % name + def setup_ca_ioc(records_dict) -> pva.CaIoc: """ Sets up a CA IOC using pva.CaIoc to broadcast records with the given names and value types. @@ -160,7 +178,7 @@ def setup_ca_ioc(records_dict) -> pva.CaIoc: epicsLibDir = os.path.dirname(pvDataLib) dbdDir = os.path.realpath('%s/../../dbd' % epicsLibDir) os.environ['EPICS_DB_INCLUDE_PATH'] = dbdDir - + # Create a temporary database file dbFile = tempfile.NamedTemporaryFile(delete=False, mode='w') @@ -197,9 +215,11 @@ def update_ca_record_field(caIoc, base_name, field_name, value) -> None: record_name = "%s:%s" % (valid_base, field_name) try: - # print(record_name, value) - if isinstance(value, list) and all(isinstance(x, (int, float)) for x in value): + if isinstance(value, bool): + value = int(value) # Cast True/False to 1/0 + elif isinstance(value, (list, np.ndarray)) and all(isinstance(x, (int, float, np.float32)) for x in value): # For numeric arrays + value = [int(val) for val in value] caIoc.putField(record_name, value) else: # # For scalar values or other types @@ -227,29 +247,24 @@ def main() -> None: "PrimaryBeamDirection": primary_beam_direction_record, "InplaneReferenceDirection": inplane_reference_direction_record, "SampleSurfaceNormalDirection": sample_surface_normal_direction_record, - "DetectorSetup": detector_setup_record + "DetectorSetup": detector_setup_record, + "ScanOn": scan_on_record, + "FilePath": file_path_record, + "FileName": file_name_record } - print(all_records) + print(axis_records) # Set up the CA IOC with these records caIoc = setup_ca_ioc(all_records) - - # Add Name field to all static records - static_records = { - "6idb:spec:UB_matrix": {**ub_matrix_record, "Name": "6idb:spec:UB_matrix"}, - "6idb:spec:Energy": {**energy_record, "Name": "6idb:spec:Energy"}, - "PrimaryBeamDirection": {**primary_beam_direction_record, "Name": "PrimaryBeamDirection"}, - "InplaneReferenceDirection": {**inplane_reference_direction_record, "Name": "InplaneReferenceDirection"}, - "SampleSurfaceNormalDirection": {**sample_surface_normal_direction_record, "Name": "SampleSurfaceNormalDirection"}, - "DetectorSetup": {**detector_setup_record, "Name": "DetectorSetup"} - } # Update static records (they remain constant) for rec_name, rec_data in all_records.items(): update_full_record(caIoc, rec_name, rec_data) # For dynamic axis records, store their base positions - base_positions = {name: rec["Position"] for name, rec in axis_records.items()} + base_positions = {name: rec['Position'] for name, rec in axis_records.items()} + dynamic_records = {**axis_records, '6idb:spec:Energy':13.0,} #'6idb:spec:UB_matrix': caget('6idb:spec:UB_matrix')} + amplitude = 0.5 # Amplitude of the sine-wave update update_interval = 0.5 # Seconds between updates start_time = time.time() @@ -257,13 +272,21 @@ def main() -> None: try: while True: elapsed = time.time() - start_time - for name, rec in axis_records.items(): + for name, rec in dynamic_records.items(): # Update the Position field with a sine offset - new_position = base_positions[name] + amplitude * math.sin(elapsed) + new_position = 5 + (amplitude * math.sin(elapsed)) # caget(name) #+ amplitude * math.sin(elapsed) # Only update the Eta field For Now - if rec["SpecMotorName"] == "Eta": - # Only Update Eta Position - update_ca_record_field(caIoc, name, "Position", new_position) + if isinstance(rec, dict): + if rec["SpecMotorName"] == "Eta": + # Only Update Eta Position + update_ca_record_field(caIoc, name, 'Position', new_position) + if rec["SpecMotorName"] == "Delta": + # Only Update Eta Position + update_ca_record_field(caIoc, name, 'Position', new_position) + # elif name == '6idb:spec:Energy': + # update_ca_record_field(caIoc, name, 'Value', new_position) + # elif name == '6idb:spec:UB_matrix': + # update_ca_record_field(caIoc, name, 'Value', new_position) time.sleep(update_interval) except KeyboardInterrupt: diff --git a/dashpva.py b/dashpva.py new file mode 100644 index 0000000..ed3dfa9 --- /dev/null +++ b/dashpva.py @@ -0,0 +1,120 @@ +# Register HDF5 compression filters globally (e.g., blosc/bitshuffle) +# Importing hdf5plugin once enables decompression support for all h5py reads in the app. +try: + import hdf5plugin # noqa: F401 +except Exception: + pass + +import sys +import click +import subprocess + +CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) + +@click.group(context_settings=CONTEXT_SETTINGS) +@click.option('-d', '--detector', 'help_detector', is_flag=True, help='Show help for detector') +@click.option('-r', '--run', 'help_run', is_flag=True, help='Show help for run') +@click.option('-k', '--hkl3d', 'help_hkl3d', is_flag=True, help='Show help for hkl3d') +@click.option('-l', '--slice3d', 'help_slice3d', is_flag=True, help='Show help for slice3d') +@click.option('-S', '--setup', 'help_setup', is_flag=True, help='Show help for setup') +@click.option('-w', '--workbench', 'help_workbench', is_flag=True, help='Show help for workbench') +@click.option('-m', '--monitor', 'help_monitor', is_flag=True, help='Show help for monitor') +@click.pass_context +def cli(ctx, help_detector, help_run, help_hkl3d, help_slice3d, help_setup, help_workbench, help_monitor): + """ + DashPVA: High-Performance X-ray Visualization & Analysis Tool. + + This suite provides real-time monitoring (PVA), 3D Reciprocal Space Mapping (HKL), + and post-processing workbenches to analyze and manipulate your data. + """ + # Handle global help flags that print subcommand help and exit + selected = [name for name, flag in [ + ('detector', help_detector), + ('run', help_run), + ('hkl3d', help_hkl3d), + ('slice3d', help_slice3d), + ('setup', help_setup), + ('workbench', help_workbench), + ('monitor', help_monitor), + ] if flag] + + if len(selected) > 1: + raise click.UsageError('Please pick only one global help flag (e.g., -d/--detector, -r/--run, -k/--hkl3d, -l/--slice3d, -S/--setup, -w/--workbench, -v/--view).') + + if len(selected) == 1: + sub_name = selected[0] + sub_cmd = cli.get_command(ctx, sub_name) + if sub_cmd is None: + raise click.UsageError(f'Unknown command: {sub_name}') + sub_ctx = click.Context(sub_cmd, info_name=f"{ctx.info_name} {sub_name}", parent=ctx) + click.echo(sub_cmd.get_help(sub_ctx)) + ctx.exit() + + # Otherwise, proceed normally + +@cli.command() +def run(): + """Open DashPVA launcher menu with process tracking and indicators.""" + click.echo('Opening DashPVA Launcher') + subprocess.run([sys.executable, 'viewer/launcher.py']) + +@cli.command() +def hkl3d(): + """Launch HKL 3D Viewer""" + click.echo('Running HKL 3D Viewer') + subprocess.run([sys.executable, 'viewer/hkl_3d_viewer.py']) + + +@cli.command() +def slice3d(): + """(Standalone Mode) Launch HKL 3D Slicer""" + click.echo('Running HKL 3D Slicer -- Standalone') + subprocess.run([sys.executable, 'viewer/hkl_3d_slice_window.py']) + + +@cli.command() +def detector(): + """Launch Area Detector Viewer""" + click.echo('Running Area Detector Viewer') + subprocess.run([sys.executable, 'viewer/area_det_viewer.py']) + + +@cli.command() +@click.option('--ioc', is_flag=True, help='Run the simulator setup instead of the standard setup.') +def setup(ioc): + """Sets up the PVA workflow or the simulator.""" + if ioc: + command = [sys.executable, 'consumers/sim_rsm_data.py'] + click.echo('Running simulator setup...') + subprocess.Popen(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + click.echo('Running standard PVA setup...') + subprocess.run([sys.executable, 'pva_setup/pva_workflow_setup_dialog.py']) + +@cli.command() +def workbench(): + """Launch Workbench - Data Analysis Tool""" + click.echo('Running Workbench - Data Analysis Tool') + subprocess.run([sys.executable, 'viewer/workbench/workbench.py']) + + +@cli.command() +@click.argument('name', type=click.Choice(['scan', 'scan-monitors'])) +@click.option('--channel', default='', help='PVA channel (optional).') +@click.option('--config', 'config_path', default='', help='Path to TOML config file (optional).') +def monitor(name, channel, config_path): + """Open a specific monitor by name. Supported: scan (alias: scan-monitors).""" + click.echo(f'Opening monitor: {name}') + if name in ('scan', 'scan-monitors'): + command = [sys.executable, 'viewer/scan_view.py'] + else: + raise click.BadParameter(f'Unknown view name: {name}') + if config_path: + command.extend(['--config', config_path]) + if channel: + command.extend(['--channel', channel]) + subprocess.run(command) + + +if __name__ == '__main__': + cli() diff --git a/database/__init__.py b/database/__init__.py new file mode 100644 index 0000000..d0d79f0 --- /dev/null +++ b/database/__init__.py @@ -0,0 +1,15 @@ +""" +Database package public API. + +Importing `database` exposes the DatabaseInterface facade, which is the single +public entry point for all database operations (profiles, configs, import/export). +Internal modules (manager, models, migrations) are implementation details. + +Usage: + from database import DatabaseInterface + db = DatabaseInterface() +""" + +from .interface import DatabaseInterface + +__all__ = ["DatabaseInterface"] diff --git a/database/config/interfaces.py b/database/config/interfaces.py new file mode 100644 index 0000000..ee8f23f --- /dev/null +++ b/database/config/interfaces.py @@ -0,0 +1,35 @@ +""" +Interfaces for configuration sources (DB and TOML) used by DashPVA. +""" + +from typing import Dict, Any + + +class ConfigSource: + """ + Abstract interface for a configuration source. + + Implementations: + - DbProfileConfigSource: loads/saves from database profiles + - TomlConfigSource: loads/saves from TOML files + + Attributes: + source_type: Literal "db" or "toml" + """ + + source_type: str = "" + + def load(self) -> Dict[str, Any]: + """ + Load configuration as a TOML-shaped dictionary. + """ + raise NotImplementedError("load() must be implemented by subclasses") + + def save(self, update: Dict[str, Any]) -> bool: + """ + Save the provided configuration dictionary back to the source. + + Returns: + True if successful, False otherwise. + """ + raise NotImplementedError("save() must be implemented by subclasses") diff --git a/database/db.py b/database/db.py new file mode 100644 index 0000000..43cf19e --- /dev/null +++ b/database/db.py @@ -0,0 +1,42 @@ +# """ +# SQLAlchemy models for DashPVA profile management +# """ +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +import os + +Base = declarative_base() + + +# Database configuration +DATABASE_URL = "sqlite:///dashpva.db" + +def get_engine(): + """Create and return database engine""" + return create_engine(DATABASE_URL, echo=False) + +def get_session(): + """Create and return database session""" + engine = get_engine() + Session = sessionmaker(bind=engine) + return Session() + +def create_tables(): + """Create all tables in the database""" + engine = get_engine() + Base.metadata.create_all(engine) + +def init_database(): + """Initialize the database with tables""" + if not os.path.exists("dashpva.db"): + create_tables() + # Seed default settings only on first creation using raw SQL script + try: + from scripts.seed_settings_defaults_sql import seed_defaults + seed_defaults() + except Exception: + pass + print("Database initialized successfully") + else: + print("Database already exists") diff --git a/database/docs/README.md b/database/docs/README.md new file mode 100644 index 0000000..7b5f624 --- /dev/null +++ b/database/docs/README.md @@ -0,0 +1,127 @@ +MODELS +detectors: detector name, pv, desc +profiles: the manager of your program +profile_config: pvs +system: system variables, filepath, etc + +# Profile + a profile is a configuration setting + + +# Profile config +plkace to put your pv'ds and pv values +can nest + +# Detectors + +# System Variables +place to put system variables +store paths +required: LOGS, CONFIG, OUTPUT_PATH, + +## Database Tables + +The DashPVA database consists of several tables that store configuration, system information, and user profiles. Below is a comprehensive list of all tables and their columns: + +# PROFILE & PV CONFIGURATION MANAGEMENT +### profiles +Stores user configuration profiles for different experimental setups. + +| Column | Type | Description | +|--------|------|-------------| +| id | INTEGER PRIMARY KEY | Unique identifier for the profile (auto-increment) | +| name | VARCHAR(255) NOT NULL UNIQUE | Human-readable name of the profile | +| description | TEXT | Optional description of the profile's purpose | +| selected | INTEGER DEFAULT 0 | Whether this profile is currently selected (0=no, 1=yes) | +| default | INTEGER DEFAULT 0 | Whether this is the default profile (0=no, 1=yes) | +| created_at | DATETIME | When the profile was created (UTC timestamp) | +| updated_at | DATETIME | When the profile was last modified (UTC timestamp) | + +### profile_configs +Stores TOML configuration data associated with profiles. + +| Column | Type | Description | +|--------|------|-------------| +| id | INTEGER PRIMARY KEY | Unique identifier for the config entry (auto-increment) | +| profile_id | INTEGER NOT NULL | Foreign key to profiles table | +| config_type | VARCHAR(50) NOT NULL | Type of config (e.g., 'root', 'CACHE_OPTIONS', 'METADATA') | +| config_section | VARCHAR(100) | Config section name (e.g., 'ALIGNMENT', 'CA', 'ROI1') | +| config_key | VARCHAR(100) NOT NULL | Configuration key (e.g., 'DETECTOR_PREFIX', 'MAX_CACHE_SIZE') | +| config_value | TEXT | The actual configuration value as text | +| created_at | DATETIME | When the config was created (UTC timestamp) | +| updated_at | DATETIME | When the config was last modified (UTC timestamp) | + + +# SETTINGS RELATED TABLES +### settings +Stores hierarchical configuration settings with support for nested organization. + +| Column | Type | Description | +|--------|------|-------------| +| id | INTEGER PRIMARY KEY | Unique identifier for the setting (auto-increment) | +| name | VARCHAR(255) NOT NULL | The setting name/identifier | +| type | VARCHAR(100) NOT NULL | The type of setting (e.g., 'int', 'str', 'list') | +| desc | TEXT | Optional description of what this setting controls, only applies to sections +| parent_id | INTEGER | Foreign key to parent setting for hierarchical organization | + +### setting_values +Stores key-value pairs associated with settings. + +| Column | Type | Description | +|--------|------|-------------| +| id | INTEGER PRIMARY KEY | Unique identifier for the setting value (auto-increment) | +| setting_id | INTEGER NOT NULL | Foreign key to the settings table | +| key | VARCHAR(255) NOT NULL | The key name for this value | +| value | TEXT NOT NULL | The actual value stored as text (converted from original type) | +| value_type | VARCHAR(20) NOT NULL | Type indicator for the value ('string' or 'int') | + +### system (removed) +System table has been removed. Use the hierarchical `settings` and `setting_values` tables to store system-wide configuration and paths. + +# DETECTORS RELATED TABLES +### detectors +Stores detector configuration and metadata. + +| Column | Type | Description | +|--------|------|-------------| +| id | INTEGER PRIMARY KEY | Unique identifier for the detector (auto-increment) | +| name | VARCHAR(255) NOT NULL UNIQUE | Name of the detector | +| description | TEXT | Description of the detector | + +### Foreign Key Relationships + +- `profile_configs.profile_id` → `profiles.id` +- `settings.parent_id` → `settings.id` (self-referencing for hierarchical settings) +- `setting_values.setting_id` → `settings.id` + +### Indexes + +The database includes indexes on frequently queried columns: +- `profiles.name` for fast profile lookups (unique constraint) +- `profile_configs.profile_id` for profile-based config queries +- `settings.name` for fast setting lookups +- `settings.parent_id` for hierarchical queries +- `setting_values.setting_id` for value retrieval +- `setting_values.key` for key-based lookups +- `system.name` for system setting lookups (unique constraint) +- `detectors.name` for detector lookups (unique constraint) + +# Paths +paths setting has required paths structure +APP_META(root)-> Application Metadata +PATHS(root) -> LOG -> logs/ + -> CONFIGS -> configs/ + -> OUTPUTS -> outputs/ + -> SCAN -> scans/ + -> SLICES -> slices/ + -> CONSUMERS -> consumers/ + -> IOC -> ioc/ + + + + +# Getting Started +Want to stop using your toml file and use something simpler great + +It's super easy just Run the PVA Workflow Setup -> Configuration -> Import Toml and that's it + diff --git a/database/interface.py b/database/interface.py new file mode 100644 index 0000000..7f7b68a --- /dev/null +++ b/database/interface.py @@ -0,0 +1,235 @@ +""" +DatabaseInterface: Public facade for all database operations in DashPVA. + +This interface centralizes profile and configuration management behind a single +import surface. External modules (GUIs, config repository, tests) should depend +on this interface rather than internal SQLAlchemy manager/models. + +Usage: + from database import DatabaseInterface + + db = DatabaseInterface() + profiles = db.get_all_profiles() + prof = db.create_profile("my_profile", "Example") + db.import_toml_file(prof.id, "pv_configs/metadata_pvs.toml") + cfg_dict = db.export_profile_to_toml(prof.id) + +Notes: +- Initializes the SQLite database on construction. +- Wraps internal ProfileManager methods with a stable, GUI/service-friendly API. +""" + +from typing import List, Optional, Dict, Any, Union +from database.db import init_database, create_tables +from database.models.profile import Profile, ProfileConfig +from database.managers.profile import ProfileManager +from database.managers.settings import SettingsManager + + +class DatabaseInterface: + """Facade over ProfileManager providing a stable public API.""" + + def __init__(self) -> None: + # Ensure DB file/tables exist + init_database() + # Ensure any new tables are created (e.g., 'settings') + try: + create_tables() + except Exception: + pass + # Internal manager implementation + self._mgr = ProfileManager() + # Settings manager (simple name/type/desc) + self._settings_mgr = SettingsManager() + + # Profiles CRUD + + def create_profile(self, name: str, description: Optional[str] = None) -> Optional[Profile]: + return self._mgr.create_profile(name, description) + + def get_all_profiles(self) -> List[Profile]: + return self._mgr.get_all_profiles() + + def get_profile_by_id(self, profile_id: int) -> Optional[Profile]: + return self._mgr.get_profile_by_id(profile_id) + + def get_profile_by_name(self, name: str) -> Optional[Profile]: + return self._mgr.get_profile_by_name(name) + + def get_selected_profile(self) -> Optional[Profile]: + return self._mgr.get_selected_profile() + + def update_profile_name(self, profile_id: int, new_name: str) -> bool: + return self._mgr.update_profile_name(profile_id, new_name) + + def update_profile_description(self, profile_id: int, description: str) -> bool: + return self._mgr.update_profile_description(profile_id, description) + + def delete_profile(self, profile_id: int) -> bool: + return self._mgr.delete_profile(profile_id) + + # Detector CRUD + # + + # System CRUD + + # Selected / Default flags + + def set_selected_profile(self, profile_id: int) -> bool: + return self._mgr.set_selected_profile(profile_id) + + def clear_selected_profiles(self) -> bool: + return self._mgr.clear_selected_profiles() + + def get_selected_profile(self) -> Optional[Profile]: + return self._mgr.get_selected_profile() + + def set_default_profile(self, profile_id: int) -> bool: + return self._mgr.set_default_profile(profile_id) + + def unset_default_profile(self, profile_id: int) -> bool: + return self._mgr.unset_default_profile(profile_id) + + def get_default_profile(self) -> Optional[Profile]: + return self._mgr.get_default_profile() + + def any_default_exists(self) -> bool: + return self._mgr.any_default_exists() + + def profile_exists(self, name: str) -> bool: + return self._mgr.profile_exists(name) + + # Configuration entries + + def add_profile_config( + self, + profile_id: int, + config_type: str, + config_key: str, + config_value: str, + config_section: Optional[str] = None, + ) -> bool: + return self._mgr.add_profile_config(profile_id, config_type, config_key, config_value, config_section) + + def get_profile_configs(self, profile_id: int, config_type: Optional[str] = None) -> List[ProfileConfig]: + return self._mgr.get_profile_configs(profile_id, config_type) + + def clear_profile_configs(self, profile_id: int) -> bool: + return self._mgr.clear_profile_configs(profile_id) + + def update_config_value(self, config_id: int, new_value: str) -> bool: + return self._mgr.update_config_value(config_id, new_value) + + def delete_config_entry(self, config_id: int) -> bool: + return self._mgr.delete_config_entry(config_id) + + def rename_config_type(self, profile_id: int, old_type: str, new_type: str) -> bool: + return self._mgr.rename_config_type(profile_id, old_type, new_type) + + # Import / Export TOML + + def import_toml_to_profile(self, profile_id: int, toml_data: Dict[str, Any]) -> bool: + return self._mgr.import_toml_to_profile(profile_id, toml_data) + + def import_toml_file(self, profile_id: int, toml_file_path: str) -> bool: + return self._mgr.import_toml_file(profile_id, toml_file_path) + + def export_profile_to_toml(self, profile_id: int) -> Dict[str, Any]: + return self._mgr.export_profile_to_toml(profile_id) + + def export_profile_to_toml_file(self, profile_id: int, output_path: str) -> bool: + return self._mgr.export_profile_to_toml_file(profile_id, output_path) + + # Defaults / Seeding + + def ensure_shipped_default_profile(self, toml_file_path: str, name: str = "device:metadata:default") -> Optional[Profile]: + return self._mgr.ensure_shipped_default_profile(toml_file_path, name) + + def seed_system_defaults_from_toml(self, toml_file_path: str, name: str = "device:metadata:default") -> bool: + """ + Convenience wrapper used by UI setup to ensure a default profile seeded from TOML. + + Returns True if the default profile exists or was created successfully. + """ + prof = self.ensure_shipped_default_profile(toml_file_path, name=name) + return prof is not None + + # Utilities + + def clone_profile_configs(self, source_profile_id: int, dest_profile_id: int) -> bool: + return self._mgr.clone_profile_configs(source_profile_id, dest_profile_id) + + # Settings CRUD wrappers with individual setting values + def create_setting(self, name: str, type_: str, desc: Optional[str] = None, parent_id: Optional[int] = None): + return self._settings_mgr.create_setting(name, type_, desc, parent_id) + + def create_child_setting(self, parent_id: int, name: str, type_: str, desc: Optional[str] = None): + return self._settings_mgr.create_child_setting(parent_id, name, type_, desc) + + def get_all_settings(self): + return self._settings_mgr.get_all_settings() + + def get_settings_by_type(self, type_: str): + return self._settings_mgr.get_settings_by_type(type_) + + def get_setting_by_name(self, name: str): + return self._settings_mgr.get_setting_by_name(name) + + def get_setting_by_id(self, id_: int): + return self._settings_mgr.get_setting_by_id(id_) + + def get_distinct_setting_types(self): + return self._settings_mgr.get_distinct_types() + + def update_setting_desc(self, id_: int, desc: str) -> bool: + return self._settings_mgr.update_setting_desc(id_, desc) + + def delete_setting(self, id_: int) -> bool: + return self._settings_mgr.delete_setting(id_) + + # Setting Value operations + def add_setting_value(self, setting_id: int, key: str, value: Union[str, int]) -> bool: + return self._settings_mgr.add_setting_value(setting_id, key, value) + + def add_setting_value_by_name(self, setting_name: str, key: str, value: Union[str, int]) -> bool: + return self._settings_mgr.add_setting_value_by_name(setting_name, key, value) + + def update_setting_value(self, setting_id: int, key: str, value: Union[str, int]) -> bool: + return self._settings_mgr.update_setting_value(setting_id, key, value) + + def update_setting_value_by_name(self, setting_name: str, key: str, value: Union[str, int]) -> bool: + return self._settings_mgr.update_setting_value_by_name(setting_name, key, value) + + def get_setting_value(self, setting_id: int, key: str) -> Optional[Union[str, int]]: + return self._settings_mgr.get_setting_value(setting_id, key) + + def get_setting_value_by_name(self, setting_name: str, key: str) -> Optional[Union[str, int]]: + return self._settings_mgr.get_setting_value_by_name(setting_name, key) + + def remove_setting_value(self, setting_id: int, key: str) -> bool: + return self._settings_mgr.remove_setting_value(setting_id, key) + + def remove_setting_value_by_name(self, setting_name: str, key: str) -> bool: + return self._settings_mgr.remove_setting_value_by_name(setting_name, key) + + def get_all_setting_values(self, setting_id: int) -> Dict[str, Union[str, int]]: + return self._settings_mgr.get_all_setting_values(setting_id) + + def get_all_setting_values_by_name(self, setting_name: str) -> Dict[str, Union[str, int]]: + return self._settings_mgr.get_all_setting_values_by_name(setting_name) + + # Hierarchical settings operations + def get_root_settings(self): + return self._settings_mgr.get_root_settings() + + def get_setting_children(self, parent_id: int): + return self._settings_mgr.get_children(parent_id) + + def get_setting_tree(self): + return self._settings_mgr.get_setting_tree() + + def get_setting_by_path(self, path: List[str]): + return self._settings_mgr.get_setting_by_path(path) + + def move_setting(self, setting_id: int, new_parent_id: Optional[int]) -> bool: + return self._settings_mgr.move_setting(setting_id, new_parent_id) diff --git a/database/managers/settings.py b/database/managers/settings.py new file mode 100644 index 0000000..218a4d0 --- /dev/null +++ b/database/managers/settings.py @@ -0,0 +1,392 @@ +from typing import List, Optional, Union, Dict, Any +from sqlalchemy.orm import Session +from database.db import get_session +from database.models.settings import Settings +from database.models.setting_value import SettingValue + +class SettingsManager: + """ + CRUD operations for the Settings table with individual setting values. + """ + def __init__(self) -> None: + pass + + def _session(self) -> Session: + return get_session() + + # Create + def create_setting(self, name: str, type_: str, desc: Optional[str] = None, parent_id: Optional[int] = None) -> Optional[Settings]: + session = self._session() + try: + # Check for existing setting with same name and parent + existing = session.query(Settings).filter( + Settings.name == name, + Settings.parent_id == parent_id + ).first() + if existing: + return existing + + obj = Settings(name=name, type=type_, desc=desc or "", parent_id=parent_id) + session.add(obj) + session.commit() + session.refresh(obj) + return obj + except Exception: + session.rollback() + return None + finally: + session.close() + + def create_child_setting(self, parent_id: int, name: str, type_: str, desc: Optional[str] = None) -> Optional[Settings]: + """Create a child setting under a parent setting.""" + return self.create_setting(name, type_, desc, parent_id) + + # Read + def get_all_settings(self) -> List[Settings]: + session = self._session() + try: + return session.query(Settings).order_by(Settings.type, Settings.name).all() + finally: + session.close() + + def get_settings_by_type(self, type_: str) -> List[Settings]: + session = self._session() + try: + return ( + session.query(Settings) + .filter(Settings.type == type_) + .order_by(Settings.name) + .all() + ) + finally: + session.close() + + def get_distinct_types(self) -> List[str]: + session = self._session() + try: + rows = session.query(Settings.type).distinct().all() + return sorted([t[0] for t in rows if t and t[0]]) + finally: + session.close() + + def get_setting_by_name(self, name: str) -> Optional[Settings]: + session = self._session() + try: + return session.query(Settings).filter(Settings.name == name).first() + finally: + session.close() + + def get_setting_by_id(self, id_: int) -> Optional[Settings]: + session = self._session() + try: + return session.query(Settings).get(id_) + finally: + session.close() + + # Update + def update_setting_desc(self, id_: int, desc: str) -> bool: + session = self._session() + try: + obj = session.query(Settings).get(id_) + if not obj: + return False + obj.desc = desc + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() + + # Setting Value operations + def add_setting_value(self, setting_id: int, key: str, value: Union[str, int]) -> bool: + """Add a new key-value pair to a setting.""" + session = self._session() + try: + setting = session.query(Settings).get(setting_id) + if not setting: + return False + + setting_value = SettingValue(setting_id=setting_id, key=key) + setting_value.set_value(value) + session.add(setting_value) + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() + + def add_setting_value_by_name(self, setting_name: str, key: str, value: Union[str, int]) -> bool: + """Add a new key-value pair to a setting by setting name.""" + session = self._session() + try: + setting = session.query(Settings).filter(Settings.name == setting_name).first() + if not setting: + return False + + setting_value = SettingValue(setting_id=setting.id, key=key) + setting_value.set_value(value) + session.add(setting_value) + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() + + def update_setting_value(self, setting_id: int, key: str, value: Union[str, int]) -> bool: + """Update an existing setting value.""" + session = self._session() + try: + setting_value = session.query(SettingValue).filter( + SettingValue.setting_id == setting_id, + SettingValue.key == key + ).first() + + if not setting_value: + return False + + setting_value.set_value(value) + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() + + def update_setting_value_by_name(self, setting_name: str, key: str, value: Union[str, int]) -> bool: + """Update an existing setting value by setting name.""" + session = self._session() + try: + setting = session.query(Settings).filter(Settings.name == setting_name).first() + if not setting: + return False + + setting_value = session.query(SettingValue).filter( + SettingValue.setting_id == setting.id, + SettingValue.key == key + ).first() + + if not setting_value: + return False + + setting_value.set_value(value) + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() + + def get_setting_value(self, setting_id: int, key: str) -> Optional[Union[str, int]]: + """Get a specific setting value.""" + session = self._session() + try: + setting_value = session.query(SettingValue).filter( + SettingValue.setting_id == setting_id, + SettingValue.key == key + ).first() + + if not setting_value: + return None + + return setting_value.get_value() + finally: + session.close() + + def get_setting_value_by_name(self, setting_name: str, key: str) -> Optional[Union[str, int]]: + """Get a specific setting value by setting name.""" + session = self._session() + try: + setting = session.query(Settings).filter(Settings.name == setting_name).first() + if not setting: + return None + + setting_value = session.query(SettingValue).filter( + SettingValue.setting_id == setting.id, + SettingValue.key == key + ).first() + + if not setting_value: + return None + + return setting_value.get_value() + finally: + session.close() + + def remove_setting_value(self, setting_id: int, key: str) -> bool: + """Remove a setting value.""" + session = self._session() + try: + setting_value = session.query(SettingValue).filter( + SettingValue.setting_id == setting_id, + SettingValue.key == key + ).first() + + if not setting_value: + return False + + session.delete(setting_value) + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() + + def remove_setting_value_by_name(self, setting_name: str, key: str) -> bool: + """Remove a setting value by setting name.""" + session = self._session() + try: + setting = session.query(Settings).filter(Settings.name == setting_name).first() + if not setting: + return False + + setting_value = session.query(SettingValue).filter( + SettingValue.setting_id == setting.id, + SettingValue.key == key + ).first() + + if not setting_value: + return False + + session.delete(setting_value) + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() + + def get_all_setting_values(self, setting_id: int) -> Dict[str, Union[str, int]]: + """Get all values for a setting as a dictionary.""" + session = self._session() + try: + setting = session.query(Settings).get(setting_id) + if not setting: + return {} + return setting.get_all_values() + finally: + session.close() + + def get_all_setting_values_by_name(self, setting_name: str) -> Dict[str, Union[str, int]]: + """Get all values for a setting by name as a dictionary.""" + session = self._session() + try: + setting = session.query(Settings).filter(Settings.name == setting_name).first() + if not setting: + return {} + return setting.get_all_values() + finally: + session.close() + + # Hierarchical operations + def get_root_settings(self) -> List[Settings]: + """Get all root settings (settings with no parent).""" + session = self._session() + try: + return session.query(Settings).filter(Settings.parent_id.is_(None)).order_by(Settings.type, Settings.name).all() + finally: + session.close() + + def get_children(self, parent_id: int) -> List[Settings]: + """Get all direct children of a setting.""" + session = self._session() + try: + return session.query(Settings).filter(Settings.parent_id == parent_id).order_by(Settings.name).all() + finally: + session.close() + + def get_setting_tree(self) -> List[Settings]: + """Get all settings organized as a tree structure.""" + session = self._session() + try: + # Get all settings with their relationships loaded + settings = session.query(Settings).all() + # Return only root settings - children will be accessible via relationships + return [s for s in settings if s.parent_id is None] + finally: + session.close() + + def get_setting_by_path(self, path: List[str]) -> Optional[Settings]: + """Get a setting by its hierarchical path.""" + if not path: + return None + + session = self._session() + try: + current = None + for name in path: + if current is None: + # Looking for root setting + current = session.query(Settings).filter( + Settings.name == name, + Settings.parent_id.is_(None) + ).first() + else: + # Looking for child setting + current = session.query(Settings).filter( + Settings.name == name, + Settings.parent_id == current.id + ).first() + + if current is None: + return None + + return current + finally: + session.close() + + def move_setting(self, setting_id: int, new_parent_id: Optional[int]) -> bool: + """Move a setting to a new parent (or make it root if new_parent_id is None).""" + session = self._session() + try: + setting = session.query(Settings).get(setting_id) + if not setting: + return False + + # Check for circular reference + if new_parent_id is not None: + parent = session.query(Settings).get(new_parent_id) + if not parent: + return False + + # Check if new parent is a descendant of this setting + current = parent + while current: + if current.id == setting_id: + return False # Would create circular reference + current = current.parent + + setting.parent_id = new_parent_id + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() + + # Delete + def delete_setting(self, id_: int) -> bool: + session = self._session() + try: + obj = session.query(Settings).get(id_) + if not obj: + return False + session.delete(obj) + session.commit() + return True + except Exception: + session.rollback() + return False + finally: + session.close() diff --git a/database/models/setting_value.py b/database/models/setting_value.py new file mode 100644 index 0000000..af85f4a --- /dev/null +++ b/database/models/setting_value.py @@ -0,0 +1,44 @@ +from sqlalchemy import Column, Integer, String, Text, ForeignKey +from sqlalchemy.orm import relationship +from database.db import Base +from typing import Union, Optional + +class SettingValue(Base): + __tablename__ = 'setting_values' + + id = Column(Integer, primary_key=True, autoincrement=True) + setting_id = Column(Integer, ForeignKey('settings.id'), nullable=False) + key = Column(String(255), nullable=False) + value = Column(Text, nullable=False) # Store as string, convert as needed + value_type = Column(String(20), nullable=False) # 'string' or 'int' + + # Relationship back to the parent setting + setting = relationship("Settings", back_populates="values") + + def set_value(self, value: Union[str, int]) -> None: + """Set the value and automatically determine the type.""" + if isinstance(value, int): + self.value = str(value) + self.value_type = 'int' + else: + self.value = str(value) + self.value_type = 'string' + + def get_value(self) -> Union[str, int]: + """Get the value with proper type conversion.""" + if self.value_type == 'int': + try: + return int(self.value) + except ValueError: + return self.value + return self.value + + def to_dict(self) -> dict: + """Convert to dictionary representation.""" + return { + 'id': self.id, + 'setting_id': self.setting_id, + 'key': self.key, + 'value': self.get_value(), + 'value_type': self.value_type + } diff --git a/database/models/settings.py b/database/models/settings.py new file mode 100644 index 0000000..ffac1a5 --- /dev/null +++ b/database/models/settings.py @@ -0,0 +1,124 @@ +from sqlalchemy import Column, Integer, String, Text, ForeignKey +from sqlalchemy.orm import relationship +from database.db import Base +from typing import List, Dict, Any, Union, Optional + +class Settings(Base): + __tablename__ = 'settings' + + id = Column(Integer, primary_key=True, autoincrement=True) + name = Column(String(255), nullable=False) + type = Column(String(100), nullable=False) + desc = Column(Text, nullable=True) + parent_id = Column(Integer, ForeignKey('settings.id'), nullable=True) + + # Self-referential relationship for hierarchy + parent = relationship("Settings", remote_side=[id], back_populates="children") + children = relationship("Settings", back_populates="parent", cascade="all, delete-orphan") + + # Relationship to setting values + values = relationship("SettingValue", back_populates="setting", cascade="all, delete-orphan") + + def add_value(self, key: str, value: Union[str, int]) -> None: + """Add a new key-value pair to this setting.""" + from database.models.setting_value import SettingValue + setting_value = SettingValue(key=key, setting_id=self.id) + setting_value.set_value(value) + self.values.append(setting_value) + + def get_value(self, key: str) -> Optional[Union[str, int]]: + """Get a specific value by key.""" + for setting_value in self.values: + if setting_value.key == key: + return setting_value.get_value() + return None + + def update_value(self, key: str, value: Union[str, int]) -> bool: + """Update an existing value by key.""" + for setting_value in self.values: + if setting_value.key == key: + setting_value.set_value(value) + return True + return False + + def remove_value(self, key: str) -> bool: + """Remove a value by key.""" + for setting_value in self.values: + if setting_value.key == key: + self.values.remove(setting_value) + return True + return False + + def get_all_values(self) -> Dict[str, Union[str, int]]: + """Get all key-value pairs as a dictionary.""" + return {sv.key: sv.get_value() for sv in self.values} + + def get_values_list(self) -> List[Dict[str, Any]]: + """Get all values as a list of dictionaries.""" + return [sv.to_dict() for sv in self.values] + + def add_child(self, name: str, type_: str, desc: Optional[str] = None) -> 'Settings': + """Add a child setting to this setting.""" + child = Settings(name=name, type=type_, desc=desc, parent_id=self.id) + self.children.append(child) + return child + + def get_path(self) -> List[str]: + """Get the full path from root to this setting.""" + path = [] + current = self + while current: + path.insert(0, current.name) + current = current.parent + return path + + def get_full_path(self) -> str: + """Get the full path as a string separated by '/'.""" + return '/'.join(self.get_path()) + + def is_root(self) -> bool: + """Check if this is a root setting (no parent).""" + return self.parent_id is None + + def is_leaf(self) -> bool: + """Check if this is a leaf setting (has values but no children).""" + return len(self.children) == 0 and len(self.values) > 0 + + def is_container(self) -> bool: + """Check if this is a container setting (has children).""" + return len(self.children) > 0 + + def get_descendants(self) -> List['Settings']: + """Get all descendant settings recursively.""" + descendants = [] + for child in self.children: + descendants.append(child) + descendants.extend(child.get_descendants()) + return descendants + + def to_dict(self) -> Dict[str, Any]: + """Convert the setting to a dictionary representation.""" + return { + 'id': self.id, + 'name': self.name, + 'type': self.type, + 'desc': self.desc, + 'parent_id': self.parent_id, + 'values': self.get_all_values(), + 'children': [child.to_dict() for child in self.children] + } + + def to_tree_dict(self) -> Dict[str, Any]: + """Convert to a nested dictionary structure for tree display.""" + result = { + 'id': self.id, + 'name': self.name, + 'type': self.type, + 'desc': self.desc, + 'values': self.get_all_values() + } + + if self.children: + result['children'] = {child.name: child.to_tree_dict() for child in self.children} + + return result diff --git a/dummy/DUMMY_POINT_DATA.h5 b/dummy/DUMMY_POINT_DATA.h5 new file mode 100644 index 0000000..fde932d Binary files /dev/null and b/dummy/DUMMY_POINT_DATA.h5 differ diff --git a/environment.yml b/environment.yml index 4f1ce12..6b6e1cb 100755 --- a/environment.yml +++ b/environment.yml @@ -2,7 +2,6 @@ name: DashPVA channels: - apsu - conda-forge - - defaults dependencies: - _libgcc_mutex=0.1=conda_forge - _openmp_mutex=4.5=2_gnu @@ -23,11 +22,11 @@ dependencies: - brotli-python=1.1.0=py311hfdbb021_2 - bzip2=1.0.8=h5eee18b_6 - c-ares=1.34.4=hb9d3cd8_0 - - ca-certificates=2025.2.25=h06a4308_0 + - ca-certificates=2025.2.25 - cached-property=1.5.2=hd8ed1ab_1 - cached_property=1.5.2=pyha770c72_1 - cairo=1.18.2=h3394656_1 - - certifi=2025.1.31=py311h06a4308_0 + - certifi=2025.1.31 - cffi=1.17.1=py311hf29c0ef_0 - charset-normalizer=3.4.1=pyhd8ed1ab_0 - click=8.1.8=pyh707e725_0 @@ -313,13 +312,16 @@ dependencies: - xorg-xf86vidmodeproto=2.3.1=hb9d3cd8_1005 - xorg-xproto=7.0.31=hb9d3cd8_1008 - xrayutilities=1.7.10=py311h9f3472d_0 - - xz=5.6.4=h5eee18b_1 + - xz=5.6.4=hbcc6ac9_0 + - xz-gpl-tools=5.6.4=hbcc6ac9_0 + - xz-tools=5.6.4=hb9d3cd8_0 - yarl=1.18.3=py311h2dc5d0c_1 - zeromq=4.3.5=h3b0a872_7 - zipp=3.21.0=pyhd8ed1ab_1 - zlib=1.3.1=hb9d3cd8_2 - zstandard=0.23.0=py311hbc35293_1 - zstd=1.5.6=ha6fb4c9_0 + - superqt=0.6.7=pyhd8ed1ab_0 - pip: - addict==2.4.0 - asttokens==3.0.0 @@ -357,7 +359,9 @@ dependencies: - pygments==2.19.1 - pyquaternion==0.9.9 - pytz==2025.1 + - pyvistaqt==0.11.2 - pyyaml==6.0.2 + - qtpy==2.4.3 - referencing==0.36.2 - rpds-py==0.23.1 - scikit-learn==1.6.1 diff --git a/gui/analysis_processor_setup_dialog.ui b/gui/analysis_processor_setup_dialog.ui deleted file mode 100755 index a9f0c0e..0000000 --- a/gui/analysis_processor_setup_dialog.ui +++ /dev/null @@ -1,279 +0,0 @@ - - - Dialog - - - - 0 - 0 - 403 - 487 - - - - Dialog - - - - - 110 - 440 - 171 - 41 - - - - - 12 - - - - Qt::Horizontal - - - QDialogButtonBox::Cancel|QDialogButtonBox::Ok - - - - - - 10 - 20 - 381 - 401 - - - - - 12 - - - - GroupBox - - - - - 10 - 30 - 131 - 21 - - - - Input Channel: - - - - - - 10 - 120 - 301 - 20 - - - - Output Channel Suffix: ( processor:*: - - - - - - 10 - 60 - 361 - 41 - - - - border:1px solid black; -border-radius:3; - - - - - - 10 - 150 - 361 - 41 - - - - border:1px solid black; -border-radius:3; - - - - - - 310 - 120 - 51 - 21 - - - - - 12 - true - - - - suffix - - - - - - 360 - 120 - 16 - 21 - - - - ) - - - - - - 10 - 210 - 371 - 21 - - - - Analysis Processor Key Channel JSON Config: - - - - - - 10 - 240 - 361 - 41 - - - - border:1px solid black; -border-radius:3; - - - - - - 290 - 290 - 80 - 41 - - - - QPushButton{ -font: 11pt "Sans Serif"; -border: 1px solid #8f8f91; -border-radius: 3px; -background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, - stop: 0 #f6f7fa, stop: 1 #dadbde); -} -QPushButton::pressed { - background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, - stop: 0 #dadbde, stop: 1 #f6f7fa); -} - - - Browse - - - - - - 200 - 290 - 80 - 41 - - - - QPushButton{ -font: 11pt "Sans Serif"; -border: 1px solid #8f8f91; -border-radius: 3px; -background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, - stop: 0 #f6f7fa, stop: 1 #dadbde); -} -QPushButton::pressed { - background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, - stop: 0 #dadbde, stop: 1 #f6f7fa); -} - - - Create - - - - - - 20 - 350 - 161 - 41 - - - - Server Queue Size - - - - - - 290 - 350 - 81 - 41 - - - - - - - - - btn_accept_reject - accepted() - Dialog - accept() - - - 248 - 254 - - - 157 - 274 - - - - - btn_accept_reject - rejected() - Dialog - reject() - - - 316 - 260 - - - 286 - 274 - - - - - diff --git a/gui/base_mainwindow.ui b/gui/base_mainwindow.ui new file mode 100644 index 0000000..69ef212 --- /dev/null +++ b/gui/base_mainwindow.ui @@ -0,0 +1,134 @@ + + + BaseMainWindow + + + + 0 + 0 + 800 + 600 + + + + Base Main Window + + + + + 8 + + + 12 + + + 12 + + + 12 + + + 12 + + + + + Content Area - Customize this section for your specific window + + + Qt::AlignCenter + + + color: #666; font-style: italic; + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + + + 0 + 0 + 800 + 22 + + + + + File + + + + + + + + + + + Documentation + + + + + + + + + + Open... + + + Ctrl+O + + + + + Save + + + Ctrl+S + + + + + Open Folder... + + + Ctrl+Shift+O + + + + + Exit + + + Ctrl+Q + + + + + Open Documentation + + + F1 + + + + + + diff --git a/gui/controls/hkl_controls_dialog.ui b/gui/controls/hkl_controls_dialog.ui new file mode 100644 index 0000000..3dc2d02 --- /dev/null +++ b/gui/controls/hkl_controls_dialog.ui @@ -0,0 +1,477 @@ + + + HKLControlsDialog + + + DashPVA --3D Slice -- Controls + + + + + + Slice Controls + + + + + + Steps + + + + + + Translate step + + + + + + + 6 + + + 0.000000000000000 + + + 1000000000.000000000000000 + + + 0.001000000000000 + + + 0.010000000000000 + + + + + + + Rotation step (deg) + + + + + + + 3 + + + 0.000000000000000 + + + 180.000000000000000 + + + 1.000000000000000 + + + 1.000000000000000 + + + + + + + Reset Slice + + + + + + + + + + Orientation + + + + + + Preset + + + + + + + + HK(xy) + + + + + KL(yz) + + + + + HL(xz) + + + + + Custom + + + + + + + + Custom normal + + + + + + + + + 6 + + + -1.000000000000000 + + + 1.000000000000000 + + + 0.000000000000000 + + + + + + + 6 + + + -1.000000000000000 + + + 1.000000000000000 + + + 0.000000000000000 + + + + + + + 6 + + + -1.000000000000000 + + + 1.000000000000000 + + + 1.000000000000000 + + + + + + + + + + + + Translate + + + + + + Up (normal) + + + + + + + Down (normal) + + + + + + + +H + + + + + + + -H + + + + + + + +K + + + + + + + -K + + + + + + + +L + + + + + + + -L + + + + + + + + + + Rotate + + + + + + + around H + + + + + + + - around H + + + + + + + + around K + + + + + + + - around K + + + + + + + + around L + + + + + + + - around L + + + + + + + + + + + + + Camera Controls + + + + + + Movements + + + + + + Zoom Out + + + + + + + Zoom In + + + + + + + Zoom step (factor) + + + + + + + 3 + + + 1.001000000000000 + + + 5.000000000000000 + + + 0.050000000000000 + + + 1.500000000000000 + + + + + + + Reset Camera + + + + + + + HK(xy) + + + + + + + KL(yz) + + + + + + + HL(xz) + + + + + + + + + + Presets + + + + + + + HK(xy) + + + + + KL(yz) + + + + + HL(xz) + + + + + H+ (X+) + + + + + H- (X-) + + + + + K+ (Y+) + + + + + K- (Y-) + + + + + L+ (Z+) + + + + + L- (Z-) + + + + + Isometric + + + + + + + + Set + + + + + + + + + + Slice-aligned Views + + + + + + View along Slice Normal + + + + + + + + + + + + + + diff --git a/gui/dashpva.ui b/gui/dashpva.ui new file mode 100644 index 0000000..62d62e0 --- /dev/null +++ b/gui/dashpva.ui @@ -0,0 +1,190 @@ + + + DashPVALauncher + + + + 0 + 0 + 420 + 360 + + + + DashPVA Launcher + + + + 8 + + + 12 + + + 12 + + + 12 + + + 12 + + + + + font-size: 16px; font-weight: bold; + + + Select a module to launch + + + Qt::AlignCenter + + + + + + + HKL 3D Viewer + + + + + + + HKL 3D Slicer + + + + + + + font-weight: bold; color: #34495e; font-size: 12px; + + + Live Analysis + + + Qt::AlignCenter + + + + + + + Area Detector Viewer + + + + + + + PVA Workflow Setup + + + + + + + Launch simulator in background (no console output) + + + Start caIOC (background) + + + + + + + Qt::Vertical + + + + 20 + 20 + + + + + + + + font-weight: bold; color: #34495e; font-size: 12px; + + + Post Analysis Tools + + + Qt::AlignCenter + + + + + + + Workbench + + + Launch Workbench - Data Analysis Tool + + + + + + + color: #666; + + + No modules running + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Settings + + + + + + + Force stop all running modules + + + Shutdown All + + + + + + + Exit + + + + + + + + + + diff --git a/gui/edit_add_config_dialog.ui b/gui/edit_add_config_dialog.ui deleted file mode 100755 index e520004..0000000 --- a/gui/edit_add_config_dialog.ui +++ /dev/null @@ -1,230 +0,0 @@ - - - new_config - - - - 0 - 0 - 402 - 660 - - - - Dialog - - - - - 220 - 620 - 171 - 32 - - - - - 12 - - - - Qt::Horizontal - - - QDialogButtonBox::Cancel|QDialogButtonBox::Ok - - - - - - 30 - 40 - 341 - 41 - - - - - 12 - - - - border:1px solid black; -border-radius:3; - - - - - - 30 - 120 - 341 - 41 - - - - - 12 - - - - border:1px solid black; -border-radius:3; - - - - - - 20 - 20 - 121 - 16 - - - - - 12 - - - - PV Key/Name: - - - - - - 20 - 100 - 111 - 21 - - - - - 12 - - - - PV Item/Info: - - - - - - 220 - 180 - 91 - 31 - - - - - 12 - - - - Add - - - - - - 90 - 180 - 91 - 31 - - - - - 12 - - - - Delete - - - - - - 20 - 220 - 381 - 389 - - - - - 0 - 0 - - - - Qt::ScrollBarAlwaysOn - - - Qt::ScrollBarAlwaysOff - - - true - - - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - - - true - - - - 0 - 0 - 365 - 387 - - - - - 0 - 0 - - - - - - - - - btns_ok_cancel - accepted() - new_config - accept() - - - 248 - 254 - - - 157 - 274 - - - - - btns_ok_cancel - rejected() - new_config - reject() - - - 316 - 260 - - - 286 - 274 - - - - - diff --git a/gui/hkl_3d_slice_window.ui b/gui/hkl_3d_slice_window.ui new file mode 100644 index 0000000..7348d4f --- /dev/null +++ b/gui/hkl_3d_slice_window.ui @@ -0,0 +1,1068 @@ + + + MainWindow + + + + 0 + 0 + 1879 + 1188 + + + + MainWindow + + + + + + + + 1125 + 0 + + + + + 12 + + + + Viewing 0 Image(s) + + + + + 20 + 40 + 1101 + 1041 + + + + + QLayout::SetMaximumSize + + + 5 + + + + + + + + + Qt::ScrollBarAsNeeded + + + Qt::ScrollBarAsNeeded + + + true + + + + + 0 + 0 + 728 + 1126 + + + + + 0 + 1 + + + + + + + + 14 + + + + Load Data + + + + + + + 12 + + + + Use Parent Data + + + + + + + + + + + 0 + 0 + + + + + 14 + + + + Info + + + + + + + 12 + + + + Low Resolution Render Time(s) : + + + + + + + + 12 + + + + Time to compute voxelization/interpolation for the current render. + + + QFrame::Box + + + 0 + + + + + + + Absolute path of the loaded data file. + + + File Path: + + + + + + + false + + + + + + + + 12 + + + + Preset plane orientation (HK/HL/KL or Custom). + + + Slice Orientation: + + + + + + + + 12 + + + + Preset plane orientation (HK/HL/KL or Custom). + + + QFrame::Box + + + - + + + + + + + + 12 + + + + Slice Normal (H,K,L): + + + + + + + + 12 + + + + Normalized normal vector of the slice plane in HKL coordinates. + + + QFrame::Box + + + [0.000, 0.000, 1.000] + + + + + + + + 12 + + + + Slice plane origin in HKL coordinates. + + + Slice Origin: + + + + + + + + 12 + + + + Slice plane origin in HKL coordinates. + + + QFrame::Box + + + [0.00000, 0.00000, 0.00000] + + + + + + + + 12 + + + + Slice Position: + + + + + + + + 12 + + + + QFrame::Box + + + - + + + + + + + + 12 + + + + Image Size (Original): + + + + + + + + 12 + + + + Original image dimensions (H × K) before any reduction. + + + QFrame::Box + + + 0 x 0 + + + + + + + + 12 + + + + Current image dimensions (H × K) after reduction. + + + Image Size (Current): + + + + + + + + 12 + + + + Current image dimensions (H × K) after reduction. + + + QFrame::Box + + + 0 x 0 + + + + + + + + 12 + + + + Points (Original): + + + Total number of points before reduction or filtering. + + + + + + + + 12 + + + + QFrame::Box + + + 0 + + + Total number of points before reduction or filtering. + + + + + + + + 12 + + + + Points (Current): + + + Number of points currently visible after reduction and HKL filtering. + + + + + + + + 12 + + + + QFrame::Box + + + 0 + + + Number of points currently visible after reduction and HKL filtering. + + + + + + + + + + + 14 + + + + Image Resolution + + + + + + + 14 + + + + Original + + + + + + + + 11 + + + + Point Size: + + + + + + + + 11 + + + + QFrame::Box + + + 0 + + + + + + + + 11 + + + + Resolution: + + + + + + + + + + 11 + + + + QFrame::NoFrame + + + 0 + + + Qt::AlignCenter + + + + + + + + 11 + + + + QFrame::NoFrame + + + x + + + + + + + + 11 + + + + QFrame::NoFrame + + + 0 + + + + + + + + + + 14 + + + + Current + + + + + + + + 11 + + + + Point Size: + + + + + + + + 11 + + + + QFrame::Box + + + 0 + + + + + + + + 11 + + + + Resolution: + + + + + + + + + + 11 + + + + QFrame::NoFrame + + + 0 + + + Qt::AlignCenter + + + + + + + + 11 + + + + QFrame::NoFrame + + + x + + + + + + + + 11 + + + + QFrame::NoFrame + + + 0 + + + + + + + + + + 11 + + + + Auto target cells per axis. Leave blank for auto (adaptive). Smaller = faster; larger = more detail. + + + Target Cells/Axis: + + + + + + + + 11 + + + + Auto (adaptive) + + + Target cells per axis. Leave blank for auto (adaptive). Smaller values render faster; larger values show more detail. + + + + + + + + + + + 14 + + + + Toggle Visibility + + + + + + + 12 + + + + Color Map + + + + + + + + 12 + + + + + jet + + + + + viridis + + + + + plasma + + + + + magma + + + + + hot + + + + + PuRd + + + + + + + + true + + + + 12 + + + + Slice Pointer + + + true + + + + + + + true + + + + 12 + + + + Slice + + + true + + + + + + + + 12 + + + + + + + + + 12 + + + + Min Intensity + + + + + + + + 12 + + + + Max Intensity + + + + + + + + 12 + + + + + + + + + 12 + + + + Reduction Factor + + + + + + + + 12 + + + + + None + + + + + x2 + + + + + x4 + + + + + x8 + + + + + + + + true + + + + 12 + + + + Points + + + true + + + + + + + true + + + + 12 + + + + Lock Slice + + + false + + + + + + + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + 12 + + + + Settings + + + Open settings and database manager + + + + + + + + + + + + + + + 0 + 0 + 1879 + 20 + + + + + + 14 + + + + Data + + + + + + + + + 14 + + + + Controls + + + + + + + 14 + + + + Tools + + + + View + + + + + + + + + 14 + + + + Settings + + + + + + + + + + + + Extract Slice + + + + 12 + + + + + + Load + + + + 12 + + + + + + Save + + + + 12 + + + + + + Controls… + + + + 12 + + + + + + Slice + + + + 12 + + + + + + Optimize… + + + + 12 + + + + + + + diff --git a/gui/hkl_slice_2d_view.ui b/gui/hkl_slice_2d_view.ui new file mode 100644 index 0000000..31382af --- /dev/null +++ b/gui/hkl_slice_2d_view.ui @@ -0,0 +1,18 @@ + + + HKLSlice2DViewWidget + + + 2D Slice View + + + + + + + + + + + + diff --git a/gui/hkl_viewer_window.ui b/gui/hkl_viewer_window.ui new file mode 100644 index 0000000..0abe826 --- /dev/null +++ b/gui/hkl_viewer_window.ui @@ -0,0 +1,1195 @@ + + + MainWindow + + + + 0 + 0 + 1448 + 1003 + + + + + 0 + 0 + + + + + 12 + + + + MainWindow + + + + + 0 + 0 + + + + + + + 5 + + + + + + 1 + 1 + + + + + 900 + 800 + + + + + 16777215 + 16777215 + + + + + 1 + 1 + + + + Live View + + + + + + + + + + + + 0 + + + 5 + + + + + + 103 + 70 + + + + + Sans Serif + 11 + 50 + false + false + + + + QPushButton{ +font: 11pt "Sans Serif"; +border: 1px solid #8f8f91; +border-radius: 7px; +background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, + stop: 0 #f6f7fa, stop: 1 #dadbde); +} +QPushButton::pressed { + background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, + stop: 0 #dadbde, stop: 1 #f6f7fa); +} + + + Start Live View + + + + + + + + 101 + 70 + + + + + Sans Serif + 11 + 50 + false + false + + + + QPushButton{ +font: 11pt "Sans Serif"; +border: 1px solid #8f8f91; +border-radius: 7px; +background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, + stop: 0 #f6f7fa, stop: 1 #dadbde); +} + +QPushButton:pressed { + background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, + stop: 0 #dadbde, stop: 1 #f6f7fa); +} + + + Stop Live View + + + + + + + + + true + + + + 0 + 0 + + + + + 380 + 500 + + + + + 425 + 16777215 + + + + + 1 + 0 + + + + false + + + Qt::StrongFocus + + + Qt::DefaultContextMenu + + + false + + + QFrame::StyledPanel + + + QFrame::Plain + + + 1 + + + Qt::ScrollBarAlwaysOn + + + Qt::ScrollBarAsNeeded + + + QAbstractScrollArea::AdjustIgnored + + + false + + + + + 0 + -320 + 360 + 1200 + + + + + 0 + 0 + + + + + 0 + 0 + + + + + 0 + + + QLayout::SetDefaultConstraint + + + 0 + + + 0 + + + + + + 0 + 0 + + + + + 0 + 300 + + + + + 400 + 395 + + + + + 12 + + + + Stats + + + false + + + + + 10 + 30 + 340 + 352 + + + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignTop + + + 8 + + + 12 + + + + + + 0 + 25 + + + + + 16777215 + 21 + + + + + 12 + + + + Frames Received: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 21 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0 + + + + + + + + 0 + 25 + + + + + 16777215 + 21 + + + + + 12 + + + + Frames Missed: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 21 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0 + + + + + + + + 0 + 25 + + + + + 12 + + + + Max [px value]: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 16777215 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 25 + + + + + 12 + + + + Min [px value]: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 16777215 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 25 + + + + + 12 + + + + Image Data Type: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 16777215 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + none + + + + + + + + 12 + + + + Set Max Intensity: + + + + + + + + 0 + 30 + + + + + 150 + 16777215 + + + + + 12 + + + + 9999999999.989999771118164 + + + + + + + + 150 + 16777215 + + + + + 12 + + + + -10000000000.000000000000000 + + + 9999999999.989999771118164 + + + + + + + + 12 + + + + Set Min Intensity: + + + + + + + Set Min Opacity: + + + + + + + Set Max Opacity + + + + + + + 1.000000000000000 + + + 0.100000000000000 + + + + + + + 1.000000000000000 + + + 0.100000000000000 + + + 1.000000000000000 + + + + + + + + + + + + 0 + 500 + + + + + 16777215 + 700 + + + + Image + + + + + 10 + 30 + 349 + 81 + + + + + QLayout::SetDefaultConstraint + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + 8 + + + 12 + + + 0 + + + + + Image Pixel Order: + + + + + + + + + C + + + + + + + Fortran + + + true + + + + + + + + + true + + + + 0 + 0 + + + + + 12 + + + + false + + + Log Image + + + false + + + + + + + Reset Camera + + + + + + + + + 20 + 500 + 301 + 80 + + + + + + + + 0 + 0 + + + + Plot Cache + + + + + + + + + 10 + 120 + 321 + 361 + + + + + 0 + + + + + + 0 + 0 + + + + Open Slice 3D Window + + + + + + + + + 20 + 600 + 301 + 82 + + + + + + + + 0 + 0 + + + + + 0 + 80 + + + + Save Cache + + + + + + + + + + + + + + + + 0 + 0 + + + + + 900 + 90 + + + + + 16777215 + 70 + + + + + 12 + + + + Connection + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignTop + + + + + 20 + 30 + 151 + 42 + + + + + QFormLayout::AllNonFixedFieldsGrow + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignTop + + + + + + 0 + 0 + + + + + 60 + 40 + + + + + 175 + 40 + + + + + 12 + + + + Provider: + + + + + + + + 0 + 0 + + + + + 50 + 40 + + + + + 60 + 40 + + + + + 0 + 0 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + N/A + + + Qt::AlignCenter + + + + + + + + + 210 + 30 + 201 + 42 + + + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignTop + + + 10 + + + + + + 0 + 0 + + + + + 0 + 40 + + + + + 16777215 + 40 + + + + + 12 + + + + State: + + + + + + + + 0 + 0 + + + + + 100 + 40 + + + + + 140 + 40 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + Disconnected + + + Qt::AlignCenter + + + + + + + + + 450 + 30 + 531 + 42 + + + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignTop + + + + + + 0 + 0 + + + + + 80 + 40 + + + + + 16675 + 40 + + + + + 12 + + + + PV Input Channel: + + + + + + + + 0 + 0 + + + + + 180 + 40 + + + + + 16776 + 40 + + + + + 12 + + + + + + + + + + + + + + + + + + + diff --git a/gui/imageshow.ui b/gui/imageshow.ui index 5fe5dc5..23a0c06 100755 --- a/gui/imageshow.ui +++ b/gui/imageshow.ui @@ -6,8 +6,8 @@ 0 0 - 1368 - 926 + 1386 + 976 @@ -171,6 +171,18 @@ QPushButton:pressed { 500 + + + 425 + 16777215 + + + + + 1 + 0 + + false @@ -208,1394 +220,1409 @@ QPushButton:pressed { 0 - -120 + 0 335 - 1911 + 2000 - - - - 10 - 0 - 321 - 321 - - - - - 0 - 0 - - - - - 12 - - - - Stats - - - - - 10 - 30 - 302 - 281 - - - - - Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + 0 + 0 + - - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignTop + + + 0 + 325 + - - 8 + + + 12 + - - 12 + + Stats - - - - - 0 - 25 - - - - - 16777215 - 21 - - - - - 12 - - - - Frames Received: - - - - - - - - 0 - 0 - - - - - 0 - 25 - - - - - 150 - 21 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0 - - - - - - - - 0 - 25 - - - - - 16777215 - 21 - - - - - 12 - - - - Frames Missed: - - - - - - - - 0 - 0 - - - - - 0 - 25 - - - - - 150 - 21 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0 - - - - - - - - 0 - 25 - - - - - 12 - - - - Max [px value]: - - - - - - - - 0 - 0 - - - - - 0 - 25 - - - - - 150 - 16777215 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 25 - - - - - 12 - - - - Min [px value]: - - - - - - - - 0 - 0 - - - - - 0 - 25 - - - - - 150 - 16777215 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 25 - - - - - 12 - - - - Data Type: - - - - - - - - 0 - 0 - - - - - 0 - 25 - - - - - 150 - 16777215 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - none - - - - - - - - 12 - - - - Set Max: - - - - - - - - 0 - 30 - - - - - 150 - 16777215 - - - - - 12 - - - - 9999999999.989999771118164 - - - - - - - - 150 - 16777215 - - - - - 12 - - - - -10000000000.000000000000000 + + + + 10 + 30 + 304 + 281 + + + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - 9999999999.989999771118164 + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignTop - - - - - - - 12 - + + 8 - - Set Min: + + 12 - - - - - - - - true - - - - 10 - 330 - 321 - 421 - - - - - 0 - 0 - - - - - 12 - - - - Mouse Position - - - false - - - false - - - - - 10 - 30 - 301 - 271 - - - - - QLayout::SetDefaultConstraint + + + + + 0 + 25 + + + + + 16777215 + 21 + + + + + 12 + + + + Frames Received: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 21 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0 + + + + + + + + 0 + 25 + + + + + 16777215 + 21 + + + + + 12 + + + + Frames Missed: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 21 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0 + + + + + + + + 0 + 25 + + + + + 12 + + + + Max [px value]: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 16777215 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 25 + + + + + 12 + + + + Min [px value]: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 16777215 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 25 + + + + + 12 + + + + Image Data Type: + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 150 + 16777215 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + none + + + + + + + + 12 + + + + Set Max Intensity: + + + + + + + + 0 + 30 + + + + + 150 + 16777215 + + + + + 12 + + + + 9999999999.989999771118164 + + + + + + + + 150 + 16777215 + + + + + 12 + + + + -10000000000.000000000000000 + + + 9999999999.989999771118164 + + + + + + + + 12 + + + + Set Min Intensity: + + + + + + + + + + + true - - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + 0 + 0 + - - 6 + + + 0 + 311 + - - 12 + + + 12 + - - 0 + + Mouse Position - - 0 + + false - - - - - 30 - 30 - - - - - 0 - 0 - - - - - 12 - - - - false - - - X position: - - - Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - - - - - - - - 0 - 0 - - - - - 0 - 25 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0 - - - - - - - - 30 - 30 - - - - - 0 - 0 - - - - - 12 - - - - false - - - Y position: - - - Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - - - - - - - - 0 - 0 - - - - - 0 - 30 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0 - - - - - - - - 0 - 30 - - - - - 0 - 0 - - - - - 12 - - - - Pixel Value: - - - Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - - - - - - - - 0 - 0 - - - - - 0 - 25 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 30 - - - - - 0 - 0 - - - - H: - - - - - - - - 0 - 30 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 30 - - - - - 0 - 0 - - - - K: - - - - - - - - 0 - 30 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 30 - - - - - 0 - 0 - - - - L: - - - - - - - - 0 - 30 - - - - QFrame::Box + + false + + + + + 10 + 30 + 301 + 271 + + + + + QLayout::SetDefaultConstraint + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - QFrame::Sunken + + 6 - - 0.0 + + 12 - - - - - - - - 9 - 309 - 301 - 101 - - - - - - - - 0 - 0 - + + 0 - - HKL 3D Viewer + + 0 - - - - - - - - - 10 - 760 - 321 - 371 - - - - - 0 - 0 - - - - - 12 - - - - Image - - - - - 10 - 30 - 304 - 331 - - - - - QLayout::SetDefaultConstraint - - - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + + + 30 + 30 + + + + + 0 + 0 + + + + + 12 + + + + false + + + X position: + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0 + + + + + + + + 30 + 30 + + + + + 0 + 0 + + + + + 12 + + + + false + + + Y position: + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + 0 + 0 + + + + + 0 + 30 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0 + + + + + + + + 0 + 30 + + + + + 0 + 0 + + + + + 12 + + + + Pixel Value: + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + 0 + 0 + + + + + 0 + 25 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 30 + + + + + 0 + 0 + + + + H: + + + + + + + + 0 + 30 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 30 + + + + + 0 + 0 + + + + K: + + + + + + + + 0 + 30 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 30 + + + + + 0 + 0 + + + + L: + + + + + + + + 0 + 30 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + + + + + 0 + 0 + - - 8 + + + 0 + 450 + - - 12 + + + 12 + - - 0 + + Image - - - - - 0 - 25 - - - - - 12 - - - - Plot Call ID: - - - - - - - - 0 - 0 - - - - - 0 - 0 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0 - - - - - - - - 28 - 16 - - - - - 12 - - - - Plotting rate (Hz): - - - - - - - - 0 - 0 - - - - - 12 - - - - 1 - - - 999999999 - - - 5 - - - - - - - - 12 - - - - Size X [px]: - - - - - - - true - - - - 0 - 0 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0 - - - - - - - - 12 - - - - Size Y [px]: - - - - - - - - 0 - 0 - - - - - 0 - 30 - - - - - 12 - - - - QFrame::Box - - - QFrame::Sunken - - - 0 - - + + + + 10 + 30 + 303 + 411 + + + + + QLayout::SetDefaultConstraint + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - - - - - true - - - - 0 - 0 - - - - - 12 - - - - false - - - Log Image - - - true - - - - - - - - 0 - 0 - - - - - 12 - - - - Freeze Image - - - - - - - - 161 - 16777215 - - - - Transpose Image + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - - - - - - 0 - 30 - + + 0 - - Show Rois + + 12 - - true + + 0 - - - - - - + + + + + 0 + 25 + + + + + 12 + + + + Plot Call ID: + + + + + + + + 0 + 0 + + + + + 0 + 0 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0 + + + + + + + + 28 + 16 + + + + + 12 + + + + Plotting rate (Hz): + + + + + + + + 0 + 0 + + + + + 12 + + + + 1 + + + 999999999 + + + 5 + + + + + + + + 12 + + + + Size X [px]: + + + + + + + true + + + + 0 + 0 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + + + 0 + + + + + + + + 12 + + + + Size Y [px]: + + + + + + + + 0 + 0 + + + + + 0 + 30 + + + + + 12 + + + + QFrame::Box + + + QFrame::Sunken + - C + 0 + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - + + + + true + + + + 0 + 0 + + + + + 12 + + + + false + + + Log Image + + + false + + + + + + + + 0 + 0 + + + + + 12 + + - Fortran + Freeze Image + + + + + + + true + + + + 161 + 16777215 + + + + Transpose Image + + + + + + + + 0 + 30 + + + + Show Rois true + + + + + + C + + + + + + + Fortran + + + true + + + + + + + + + true + + + + 140 + 50 + + + + + Sans Serif + 11 + 50 + false + false + + + + + + + Rotate 90° CCW + + + + + + + Image Pixel Order: + + + + + + + Stop HKL + + + + + + + + 0 + 0 + + + + + 140 + 50 + + + + HKL 3D Viewer + + + + + + + + 140 + 50 + + + + Save Cache + + + - - - - - - 0 - 30 - - - - - Sans Serif - 11 - 50 - false - false - - - - QPushButton{ -font: 11pt "Sans Serif"; -border: 1px solid #8f8f91; -border-radius: 3px; -background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, - stop: 0 #f6f7fa, stop: 1 #dadbde); -} -QPushButton::pressed { - background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, - stop: 0 #dadbde, stop: 1 #f6f7fa); -} - - - - Rotate 90° CCW - - - - - - - Image Pixel Order: - - - - - - - - - - 10 - 1140 - 321 - 631 - - - - - 12 - - - - ROI - - - - - 9 - 29 - 301 - 241 - - - - - Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + 0 + 0 + - - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + 0 + 630 + - - 20 + + + 12 + - - - - - 12 - - - - color: rgb(255, 0, 0); - - - ROI1 Total: - - - - - - - - 12 - + + ROI + + + + + 9 + 29 + 301 + 241 + + + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - - color: rgb(0, 0, 255); + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter - - ROI2 Total: + + 20 - - - - - - color: rgb(76, 187, 23); + + + + + 12 + + + + color: rgb(255, 0, 0); + + + ROI1 Total: + + + + + + + + 12 + + + + color: rgb(0, 0, 255); + + + ROI2 Total: + + + + + + + color: rgb(76, 187, 23); font: 12pt "Sans Serif"; - - - ROI3 Total: - - - - - - - color: rgb(255, 0, 255); + + + ROI3 Total: + + + + + + + color: rgb(255, 0, 255); font: 12pt "Sans Serif"; - - - ROI4 Total: - - - - - - - Stats5 Total: - - - - - - - - 0 - 30 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 30 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 30 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 30 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - 0 - 30 - - - - QFrame::Box - - - QFrame::Sunken - - - 0.0 - - - - - - - - - 10 - 280 - 151 - 16 - - - - - 12 - - - - ROI Specific Stats - - - - - - 10 - 300 - 301 - 321 - - - - - - - - 0 - 45 - - - - - Sans Serif - 12 - 50 - false - false - - - - QPushButton{ + + + ROI4 Total: + + + + + + + Stats5 Total: + + + + + + + + 0 + 30 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 30 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 30 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 30 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + 0 + 30 + + + + QFrame::Box + + + QFrame::Sunken + + + 0.0 + + + + + + + + + 10 + 280 + 151 + 16 + + + + + 12 + + + + ROI Specific Stats + + + + + + 10 + 300 + 301 + 321 + + + + + + + + 0 + 45 + + + + + Sans Serif + 12 + 50 + false + false + + + + QPushButton{ font: 12pt "Sans Serif"; border: 1px solid #8f8f91; border-radius: 3px; @@ -1606,22 +1633,22 @@ QPushButton::pressed { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #dadbde, stop: 1 #f6f7fa); } - - - Stats1 - - - - - - - - 0 - 45 - - - - QPushButton{ + + + Stats1 + + + + + + + + 0 + 45 + + + + QPushButton{ font: 12pt "Sans Serif"; border: 1px solid #8f8f91; border-radius:3px; @@ -1633,22 +1660,22 @@ QPushButton::pressed { stop: 0 #dadbde, stop: 1 #f6f7fa); } - - - Stats2 - - - - - - - - 0 - 45 - - - - QPushButton{ + + + Stats2 + + + + + + + + 0 + 45 + + + + QPushButton{ font: 12pt "Sans Serif"; border: 1px solid #8f8f91; border-radius:3px; @@ -1660,22 +1687,22 @@ QPushButton::pressed { stop: 0 #dadbde, stop: 1 #f6f7fa); } - - - Stats3 - - - - - - - - 0 - 45 - - - - QPushButton{ + + + Stats3 + + + + + + + + 0 + 45 + + + + QPushButton{ font: 12pt "Sans Serif"; border: 1px solid #8f8f91; border-radius:3px; @@ -1686,22 +1713,22 @@ QPushButton::pressed { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #dadbde, stop: 1 #f6f7fa); } - - - Stats4 - - - - - - - - 0 - 45 - - - - QPushButton{ + + + Stats4 + + + + + + + + 0 + 45 + + + + QPushButton{ font: 12pt "Sans Serif"; border: 1px solid #8f8f91; border-radius:3px; @@ -1712,41 +1739,43 @@ QPushButton::pressed { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #dadbde, stop: 1 #f6f7fa); } - - - Stats5 - - - - - - - - - - 10 - 1780 - 321 - 121 - - - - Analysis - - - - - 10 - 30 - 301 - 81 - - - - Open Analysis Window - - - + + + Stats5 + + + + + + + + + + + + 0 + 0 + + + + Analysis + + + + + 10 + 30 + 301 + 81 + + + + Open Analysis Window + + + + + @@ -2045,6 +2074,11 @@ QPushButton::pressed { + + + Save + + diff --git a/gui/pv_config.ui b/gui/pv_config.ui index c6a0314..61faedb 100755 --- a/gui/pv_config.ui +++ b/gui/pv_config.ui @@ -30,7 +30,7 @@ Optional Setup - + 10 @@ -51,7 +51,7 @@ border-radius:3; - /home/beams0/JULIO.RODRIGUEZ/Desktop/Lab Software/area_det_PVA_viewer/pv_configs/metadata_pvs.toml + /home/beams18/USER6IDB/DashPVA/pv_configs/6id_config.toml @@ -83,7 +83,7 @@ QPushButton::pressed { false - + 10 @@ -106,36 +106,10 @@ QPushButton::pressed { } - Create + Clear - - - - 270 - 110 - 80 - 41 - - - - QPushButton{ -font: 11pt "Sans Serif"; -border: 1px solid #8f8f91; -border-radius: 3px; -background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, - stop: 0 #f6f7fa, stop: 1 #dadbde); -} -QPushButton::pressed { - background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, - stop: 0 #dadbde, stop: 1 #f6f7fa); -} - - - Edit - - - + 10 @@ -145,7 +119,7 @@ QPushButton::pressed { - ROI Toml Config + Config TOML @@ -230,9 +204,8 @@ border-radius:3; le_input_channel btn_browse - le_roi_config - btn_edit - btn_create + le_config + btn_clear diff --git a/gui/pva_workflow_setup.ui b/gui/pva_workflow_setup.ui index e294d55..98f5ca3 100755 --- a/gui/pva_workflow_setup.ui +++ b/gui/pva_workflow_setup.ui @@ -150,7 +150,7 @@ 1 - 1000 + 5000 10 @@ -292,6 +292,20 @@ + + + + Current Mode + + + + + + + - + + + @@ -534,7 +548,7 @@ 10000 - 500 + 1000 @@ -699,7 +713,7 @@ - 1,2,3,4 + 1 @@ -755,6 +769,13 @@ + + + + consumers/hpc_rsm_consumer.py + + + @@ -762,10 +783,10 @@ - - + + - consumers/hpc_spontaneous_analysis_consumer.py + Control Channel: @@ -776,6 +797,13 @@ + + + + processor:*:analysis + + + @@ -783,6 +811,20 @@ + + + + processor:*:control + + + + + + + Number of Consumers: + + + @@ -796,6 +838,20 @@ + + + + Output Channel: + + + + + + + processor:*:status + + + @@ -803,10 +859,10 @@ - - + + - HpcAnalysisProcessor + Server Queue Size: @@ -823,59 +879,48 @@ - - - - processor:*:status - - - - - + + - processor:*:analysis + Input Channel: - - + + - Output Channel: + HpcRsmProcessor - - + + - Control Channel: + Report Period: - - + + - processor:*:control + Distributor Updates: - - - - Report Period: + + + + 1 - - - - - - Server Queue Size: + + 5 - - - - Input Channel: + + + + 1 diff --git a/gui/scan_view.ui b/gui/scan_view.ui new file mode 100644 index 0000000..f324337 --- /dev/null +++ b/gui/scan_view.ui @@ -0,0 +1,303 @@ + + ScanViewWindow + + + + 0 + 0 + 500 + 260 + + + + Monitor + + + + + + + Qt::Vertical + + + + 20 + 20 + + + + + + + + scan: off + + + Qt::AlignCenter + + + + + + + + + + Channel: + + + + + + + Enter channel name... + + + + + + + + + + + Config: + + + + + + + Select or enter path to TOML config... + + + + + + + Browse... + + + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Apply + + + + 80 + 30 + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + + Qt::AlignCenter + + + + + + + Monitor Info + + + + + + Caching Mode: + + + + + + + Not set + + + + + + + Flag PV: + + + + + + + Not set + + + + + + + Channel Active: + + + + + + + No + + + + + + + Listening: + + + + + + + False + + + + + + + Is Caching: + + + + + + + No + + + + + + + Time Scanned: + + + + + + + -- + + + + + + + Last Scan Date: + + + + + + + -- + + + + + + + + + + Activity + + + + + + Images collected over time + + + Qt::AlignCenter + + + + + + + + + 200 + 120 + + + + + + + + + + + Qt::Vertical + + + + 20 + 20 + + + + + + + + + + 0 + 0 + 500 + 22 + + + + + + + + diff --git a/gui/settings/settings_dialog.ui b/gui/settings/settings_dialog.ui new file mode 100644 index 0000000..c812b66 --- /dev/null +++ b/gui/settings/settings_dialog.ui @@ -0,0 +1,66 @@ + + + SettingsDialog + + + + 0 + 0 + 360 + 200 + + + + Settings + + + + 12 + + + 12 + + + 12 + + + 12 + + + + + Settings (placeholder) + + + Qt::AlignHCenter|Qt::AlignVCenter + + + font-size: 14px; color: #666; + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + QDialogButtonBox::Close|QDialogButtonBox::Ok + + + + + + + + diff --git a/gui/tools/metadata_converter.ui b/gui/tools/metadata_converter.ui new file mode 100644 index 0000000..fcb15c2 --- /dev/null +++ b/gui/tools/metadata_converter.ui @@ -0,0 +1,185 @@ + + + MetadataConverterDialog + + + + 0 + 0 + 640 + 480 + + + + Metadata Converter + + + + + + color: #666; + + + Convert metadata in an HDF5 file or all .h5 files in a folder recursively. Files already in the format are skipped. Defaults: modify files in place, include data/constants, base group entry/data/metadata. Select a TOML mapping file; change options if you don't want the defaults. + + + true + + + + + + + HDF5 Source + + + + + + HDF5 File or Directory: + + + + + + + Select a .h5 file or a directory + + + + + + + Browse File… + + + + + + + Browse Folder… + + + + + + + + + + TOML Mapping + + + + + + TOML File: + + + + + + + Select a mapping .toml file + + + + + + + Browse… + + + + + + + + + + Options + + + + + + Base Group: + + + + + + + entry/data/metadata + + + + + + + Include data and constants (full conversion) + + + true + + + + + + + Modify files in place + + + true + + + + + + + + + + + + Convert + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Close + + + + + + + + + true + + + Conversion log will appear here… + + + + + + + + diff --git a/gui/workbench/docks/information_dock.ui b/gui/workbench/docks/information_dock.ui new file mode 100644 index 0000000..00a47f0 --- /dev/null +++ b/gui/workbench/docks/information_dock.ui @@ -0,0 +1,188 @@ + + + InformationDockWidget + + + Information + + + + QFormLayout::AllNonFixedFieldsGrow + + + + + Total Points: + + + + + + + + + + Total number of data points across the loaded dataset + + + + + + + Axis X: + + + + + + + + + + Label for the X axis variable + + + + + + + Axis Y: + + + + + + + + + + Label for the Y axis variable + + + + + + + Intensity Low: + + + + + + + + + + Minimum intensity across the loaded dataset + + + + + + + Intensity High: + + + + + + + + + + Maximum intensity across the loaded dataset + + + + + + + + + Mouse Position: + + + + + + + + + + Current mouse cursor pixel position (x,y) + + + + + + + Mouse Intensity: + + + + + + + + + + Intensity at the current mouse position + + + + + + + h: + + + + + + + + + + H coordinate at mouse position + + + + + + + k: + + + + + + + + + + K coordinate at mouse position + + + + + + + l: + + + + + + + + + + L coordinate at mouse position + + + + + + + + diff --git a/gui/workbench/docks/slice_plane.ui b/gui/workbench/docks/slice_plane.ui new file mode 100644 index 0000000..9c575aa --- /dev/null +++ b/gui/workbench/docks/slice_plane.ui @@ -0,0 +1,472 @@ + + + SlicePlaneDockWidget + + + Slice Controls + + + + + + Steps + + + + + + Translate step + + + + + + + 5 + + + 0.001000000000000 + + + -1000000.000000000000 + + + 1000000.000000000000 + + + 0.010000000000000 + + + + + + + Rotate step (deg) + + + + + + + 2 + + + 0.500000000000000 + + + -360.000000000000000 + + + 360.000000000000000 + + + 1.000000000000000 + + + + + + + + + + Orientation + + + + + + Preset + + + + + + + + HK (xy) + + + + + KL (yz) + + + + + HL (xz) + + + + + Custom + + + + + + + + Normal H + + + + + + + 6 + + + 0.100000000000000 + + + -1000.000000000000000 + + + 1000.000000000000000 + + + 0.000000000000000 + + + + + + + Normal K + + + + + + + 6 + + + 0.100000000000000 + + + -1000.000000000000000 + + + 1000.000000000000000 + + + 0.000000000000000 + + + + + + + Normal L + + + + + + + 6 + + + 0.100000000000000 + + + -1000.000000000000000 + + + 1000.000000000000000 + + + 1.000000000000000 + + + + + + + + + + Translate + + + + + + Up Normal + + + + + + + Down Normal + + + + + + + +H + + + + + + + -H + + + + + + + +K + + + + + + + -K + + + + + + + +L + + + + + + + -L + + + + + + + + + + Rotate + + + + + + +H + + + + + + + -H + + + + + + + +K + + + + + + + -K + + + + + + + +L + + + + + + + -L + + + + + + + + + + Reset + + + + + + Reset Slice + + + + + + + + + + Visibility + + + + + + Show Slice + + + + + + + true + + + + + + + Show Points + + + + + + + true + + + + + + + + + + Camera + + + + + + Preset + + + + + + + + HK + + + + + KL + + + + + HL + + + + + ISO + + + + + H+ + + + + + H- + + + + + K+ + + + + + K- + + + + + L+ + + + + + L- + + + + + + + + Zoom In + + + + + + + Zoom Out + + + + + + + Reset Camera + + + + + + + View Slice Normal + + + + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + + diff --git a/gui/workbench/tabs/tab_3d.ui b/gui/workbench/tabs/tab_3d.ui new file mode 100644 index 0000000..58f16c2 --- /dev/null +++ b/gui/workbench/tabs/tab_3d.ui @@ -0,0 +1,194 @@ + + + HKL3DTab + + + + 0 + 0 + + + + + 8 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + + 300 + 16777215 + + + + Qt::ScrollBarAsNeeded + + + Qt::ScrollBarAsNeeded + + + true + + + + + 0 + 0 + 298 + 400 + + + + + + + Load Data + + + + + + Load Selected Dataset + + + + + + + + + + Visibility + + + + + + Color Map: + + + + + + + + jet + + + + + viridis + + + + + plasma + + + + + magma + + + + + + + + Show Slice + + + true + + + + + + + Show Points + + + true + + + + + + + + + + Intensity Range + + + + + + Min: + + + + + + + -999999 + + + 999999 + + + + + + + Max: + + + + + + + -999999 + + + 999999 + + + + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + + + + + + diff --git a/gui/workbench/workbench.ui b/gui/workbench/workbench.ui new file mode 100644 index 0000000..037b32a --- /dev/null +++ b/gui/workbench/workbench.ui @@ -0,0 +1,795 @@ + + + Workbench + + + + 0 + 0 + 1600 + 1000 + + + + Workbench - Data Analysis + + + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + Qt::Horizontal + + + + + 160 + 0 + + + + + 8 + + + 12 + + + 12 + + + 6 + + + 12 + + + + + Data Structure + + + + + + true + + + + + + + + + + + + 8 + + + 6 + + + 12 + + + 12 + + + 12 + + + + + font-size: 16px; font-weight: bold; color: #2c3e50; + + + Analysis Workbench + + + Qt::AlignCenter + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Ready + + + color: #27ae60; font-weight: bold; + + + + + + + + + Analysis Workspace + + + + 0 + 1 + + + + + + + 0 + + + + 2D Viewer + + + + + + Image Dimensions: - + + + color: #000000; font-size: 13px; font-weight: bold; + + + Qt::AlignCenter + + + + + + + + 0 + 1 + + + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + + + + 2D Controls + + + 200 + + + + + + + + Color Map: + + + + + + + + viridis + + + + + jet + + + + + plasma + + + + + inferno + + + + + magma + + + + + gray + + + + + hot + + + + + + + + Log Scale + + + + + + + Vmin: + + + + + + + 0 + + + 100 + + + 0 + + + + + + + Vmax: + + + + + + + 10 + + + 100 + + + 100 + + + + + + + Draw ROI + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Auto Levels + + + false + + + + + + + + + + + Ref Frame: + + + + + + + 0 + + + 9999 + + + true + + + QAbstractSpinBox::NoButtons + + + + + + + Other Frame: + + + + + + + 0 + + + 9999 + + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + Frame: + + + + + + + + + + + 30 + 16777215 + + + + false + + + + + + + + 80 + 16777215 + + + + false + + + 0 + + + 0 + + + + + + + + + + + 30 + 16777215 + + + + false + + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + Play + + + + + + + Pause + + + false + + + + + + + FPS: + + + + + + + 1 + + + 60 + + + 2 + + + + + + + Auto Replay + + + + + + + + + + + + + + 1D View + + + + + + + 0 + 1 + + + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + + + + 1D Controls + + + 100 + + + + + + + + 1D view placeholder + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + + + + + + Data Info + + + + + + No HDF5 file loaded + + + font-size: 14px; font-weight: bold; color: #2c3e50; padding: 10px; + + + Qt::AlignCenter + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + 150 + + + 0 + + + + Dataset Info + + + + + + true + + + QTextEdit { + background-color: #f8f9fa; + border: none; + padding: 6px; + font-family: 'Consolas', monospace; + font-size: 9pt; +} + + + Select a dataset from the tree to view detailed information. + + + + + + + + File Info + + + + + + true + + + QTextEdit { + background-color: #f8f9fa; + border: none; + padding: 6px; + font-family: 'Consolas', monospace; + font-size: 9pt; +} + + + Load an HDF5 file to view file information. + + + + + + + + + + + + + + + + + + + + + + + + 0 + 0 + 1600 + 22 + + + + + File + + + + + + + + + + + View + + + + + + + Documentation + + + + + + + + + + Open... + + + Ctrl+O + + + + + Save + + + Ctrl+S + + + + + Open Folder... + + + Ctrl+Shift+O + + + + + Collapse All + + + Ctrl+Shift+C + + + + + Expand All + + + Ctrl+Shift+E + + + + + Exit + + + Ctrl+Q + + + + + Open Documentation + + + F1 + + + + + + diff --git a/logs/workbench_3d_debug.txt b/logs/workbench_3d_debug.txt new file mode 100644 index 0000000..26c11f3 --- /dev/null +++ b/logs/workbench_3d_debug.txt @@ -0,0 +1,91 @@ +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x56021f02ff00)', 'CubeAxesActor(Addr=0x56021ed90330)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x56312c7f7ed0)', 'CubeAxesActor(Addr=0x56312edaccf0)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x55808c8952e0)', 'CubeAxesActor(Addr=0x55808c985460)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x5573fae5e810)', 'CubeAxesActor(Addr=0x5573faf4ed20)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x5560524d20c0)', 'CubeAxesActor(Addr=0x5560525c09c0)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x55dbb449c120)', 'CubeAxesActor(Addr=0x55dbb467d690)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x55ada2cdee20)', 'CubeAxesActor(Addr=0x55ada2dcea80)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x563d09f4ace0)', 'CubeAxesActor(Addr=0x563d4b9779d0)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x55c5ef6fcd80)', 'CubeAxesActor(Addr=0x55c5fd4ce8f0)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] +[WB3D] STATE after setup_3d_plot_viewer init | actors: [] | camera.position=(1.0, 1.0, 1.0) focal_point=(0.0, 0.0, 0.0) | scalar_bars: [] +[WB3D] STATE after plot_3d_points | actors: ['points', 'vtkScalarBarActor(Addr=0x55c632c002d0)', 'CubeAxesActor(Addr=0x55c67fd4ce50)'] | camera.position=(0.6970561255289582, 0.3418878304376868, 4.483681278506914) focal_point=(0.17264031818802042, -0.18252797690325107, 3.9592654711659767) | cloud_mesh_3d.bounds=BoundsTuple(x_min = 0.021327176144908186, + x_max = 0.32395346023113264, + y_min = -0.327439559358893, + y_max = -0.03761639444760914, + z_min = 3.8526264232706726, + z_max = 4.065904519061281) | scalar_bars: ['intensity'] diff --git a/notebooks/dash_analysis/DashAnalysis_Quickstart.ipynb b/notebooks/dash_analysis/DashAnalysis_Quickstart.ipynb new file mode 100644 index 0000000..d849ca0 --- /dev/null +++ b/notebooks/dash_analysis/DashAnalysis_Quickstart.ipynb @@ -0,0 +1,539 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DashAnalysis Quickstart — Comprehensive Demo Notebook\n", + "\n", + "This notebook provides a structured, end-to-end walkthrough of the DashAnalysis API with practical, reproducible examples. It expands the original quickstart to cover the full feature set demonstrated in the advanced notebook, including metadata inspection, flexible slicing, advanced visualization, line cuts (static and interactive), volume creation/visualization, and image-based workflows.\n", + "\n", + "Core functions covered:\n", + "\n", + "- da.load_data()\n", + "- data.metadata and da.show_meta()\n", + "- da.show_point_cloud()\n", + "- da.slice_data() with canonical planes, custom normals/origins, axes, slab_thickness, intensity_range\n", + "- da.show_slice() with axis_display, grid, intensity limits, return_image\n", + "- da.line_cut() with presets, custom endpoints, width averaging, interactive mode\n", + "- da.create_vol() and da.show_vol()\n", + "\n", + "Each section includes comments and code you can adapt." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup: imports, notebook backend, and robust DashAnalysis import" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib widget\n", + "\n", + "import numpy as np\n", + "import pyvista as pv\n", + "import matplotlib.pyplot as plt\n", + "pv.set_jupyter_backend('html')\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Robust import pattern: use installed module if present; otherwise import from utils\n", + "dash = None\n", + "try:\n", + " import dash_analysis as dash\n", + "except ModuleNotFoundError:\n", + " try:\n", + " import sys\n", + " import os\n", + " notebook_dir = os.getcwd()\n", + " parent_dir = os.path.dirname(os.path.dirname(notebook_dir))\n", + " if parent_dir not in sys.path:\n", + " sys.path.append(parent_dir)\n", + " from utils import dash_analysis as dash\n", + " print(\"Successfully imported dash_analysis from utils\")\n", + " except ImportError as e:\n", + " print(f\"Failed to import dash_analysis: {e}\")\n", + "\n", + "# Create analysis instance\n", + "da = dash.DashAnalysis()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load data (da.load_data) and inspect metadata\n", + "\n", + "We use a known dummy HDF5 file in this repo to ensure reproducibility. If you have a real file, set `filename` accordingly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Use the repo's dummy data by default; change this to your data file if available\n", + "filename = \"/home/beams/OODIASEIGIEHON/DashPVA/dummy/DUMMY_POINT_DATA.h5\"\n", + "filename = \"/home/beams/USER6IDB/hdf5/raw_data_compressed.h5\"\n", + "\n", + "\n", + "# Load\n", + "# PASS\n", + "data = da.load_data(filename)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Metadata overview (data.metadata and da.show_meta)\n", + "\n", + "You can access a metadata dict on the Data object and also request formatted metadata via `da.show_meta()`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Inspect a specific metadata key (example: grid)\n", + "try:\n", + " print(\"Metadata keys:\", list(data.metadata.keys())[:10])\n", + " print(\"Grid metadata:\", data.metadata.get('grid'))\n", + "except Exception as e:\n", + " print(\"No metadata available on Data object:\", e)\n", + "\n", + "# Show formatted metadata (text or dict)\n", + "_ = da.show_meta(filename, style=\"text\")\n", + "_ = da.show_meta(filename, style=\"dict\")\n", + "# _ = da.show_meta(filename, style=\"json\") # if you prefer JSON-style output\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Point cloud visualization (basic and advanced)\n", + "\n", + "Render the HKL point cloud for a quick look. In notebook mode, rendering automatically caps at 5 million points for interactivity." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Basic point cloud rendering (Data object)\n", + "# Note: pass opacity_range to avoid LUT range issues\n", + "da.show_point_cloud(\n", + " data,\n", + " cmap='viridis',\n", + " point_size=1.5,\n", + " opacity=0.15,\n", + " opacity_range=(0.0, 1.0),\n", + " render_points_as_spheres=False,\n", + " show_bounds=True\n", + ")\n", + "\n", + "# Advanced point cloud rendering with intensity range and spherical glyphs\n", + "# You can also pass (points, intensities) explicitly\n", + "da.show_point_cloud(\n", + " (data.points, data.intensities),\n", + " clim=(100.0, 50000.0), # controls color scaling\n", + " cmap='jet',\n", + " point_size=2.0,\n", + " opacity=1.0,\n", + " opacity_range=(0.2, 0.8), # intensity-based opacity range\n", + " render_points_as_spheres=True,\n", + " axes_labels=('H','K','L'),\n", + " show_bounds=True\n", + ")\n", + "\n", + "print(f\"Points: {data.points.shape}, Intensities range: {float(np.min(data.intensities)):.2f} - {float(np.max(data.intensities)):.2f}\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create and display 2D slices — multiple modes\n", + "\n", + "Demonstrate canonical planes, custom normals/origins, custom HKL axes, slab thickness, and intensity-based filtering." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 1) Canonical HL plane with grid and HKL labels\n", + "# HL plane: U=H, V=L, normal aligned with K\n", + "sl_hl = da.slice_data(\n", + " data=data,\n", + " hkl='HL', # HL plane preset\n", + " shape=(256, 256), # raster resolution (rows, cols)\n", + " show=True, # display via show_slice\n", + " axis_display='hkl', # formatted HKL labels\n", + " show_grid=True\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 2) Custom plane: specify origin (H,K,L) and normal vector\n", + "sl_custom_plane = da.slice_data(\n", + " data=data,\n", + " hkl=(0.40, 0.25, 0.70), # plane origin in HKL\n", + " normal=(0.1, 0.9, 0.3), # plane normal\n", + " shape=(256, 256),\n", + " show=True,\n", + " axis_display='uv', # display U/V labels\n", + " show_grid=True\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 3) Custom HKL in-plane axes with automatic axis labeling\n", + "# Provide (u_hkl, v_hkl) and optionally normal; labels are formatted and stored on the slice\n", + "sl_axes = da.slice_data(\n", + " data=data,\n", + " axes=((1, 1, 0), (0, 0, 1), (1, -1, 0)), # u = H+K, v = L; normal provided\n", + " shape=(300, 300),\n", + " show=True,\n", + " axis_display='hkl',\n", + " show_grid=True\n", + ")\n", + "try:\n", + " print(\"Stored slice_shape:\", sl_axes.field_data.get('slice_shape'))\n", + " print(\"U-axis label:\", sl_axes.field_data.get('slice_u_label'))\n", + " print(\"V-axis label:\", sl_axes.field_data.get('slice_v_label'))\n", + "except Exception as e:\n", + " print(\"Axis labels not available:\", e)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 4) Thick slab selection around a plane with intensity filtering\n", + "# Use slab_thickness to include points within ±thickness of plane\n", + "sl_slab = da.slice_data(\n", + " data=(data.points, data.intensities),\n", + " hkl='HL', # HL plane\n", + " shape=(512, 512),\n", + " #slab_thickness=3.0, # include points within ±2.0 of plane (remove)\n", + " intensity_range=(11,15), # pre-filter contributing points (remove)\n", + " show=False # we'll render separately with custom display limits\n", + ")\n", + "\n", + "da.show_slice(\n", + " sl_slab,\n", + " cmap='jet',\n", + " clim=(0, 15), # display limits\n", + " min_intensity=None,\n", + " max_intensity=None,\n", + " axis_display='hkl',\n", + " show_grid=True\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Working with returned slice images\n", + "\n", + "You can request a raster image and physical extent from show_slice and reuse them directly for line cuts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "img, extent = da.show_slice(sl_axes, return_image=True)\n", + "print(\"Returned image shape:\", img.shape)\n", + "print(\"Extent [Umin, Umax, Vmin, Vmax]:\", extent)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Line cuts — presets, custom endpoints, width averaging, interactive mode\n", + "\n", + "Line cuts can operate on a cached image (from the most recent show_slice), on a `(img, extent)` pair, or on a slice mesh. Demonstrated below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 1) Horizontal line cut at fixed V (\"zero\" preset)\n", + "lc_zero = da.line_cut(\n", + " 'zero',\n", + " param=(0.0, 'x'), # V fixed at 0.0; traverse U across full extent\n", + " n_samples=256,\n", + " width_px=1,\n", + " show=True # draws overlay + profile\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 2) Vertical line cut at fixed U (\"infinite\" preset) using the returned image directly\n", + "lc_inf = da.line_cut(\n", + " 'infinite',\n", + " param=(0.0, 'y'), # U fixed at 0.0; traverse V across full extent\n", + " vol=(img, extent),\n", + " n_samples=512,\n", + " width_px=3,\n", + " show=True\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 3) Diagonals across the full slice extent\n", + "lc_pos = da.line_cut(\n", + " 'positive',\n", + " n_samples=512,\n", + " width_px=3,\n", + " show=True\n", + ")\n", + "\n", + "lc_neg = da.line_cut(\n", + " 'negative',\n", + " n_samples=512,\n", + " width_px=3,\n", + " show=True\n", + ")\n", + "\n", + "print('Positive cut samples:', len(lc_pos['distance']))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 4) Custom endpoints in U/V coordinates\n", + "lc_custom = da.line_cut(\n", + " ((-0.25, -0.25), (0.25, 0.40)), # endpoints in physical slice coordinates\n", + " vol=(img, extent),\n", + " n_samples=512,\n", + " width_px=2,\n", + " show=True\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 5) Interactive line cut with draggable endpoints (requires interactive matplotlib backend)\n", + "# Ensure the first cell used '%matplotlib widget' and that 'ipympl' is installed.\n", + "# You can drag the cyan endpoints and the magenta profile updates in real-time.\n", + "lc_interactive = da.line_cut(\n", + " 'positive',\n", + " vol=(img, extent),\n", + " interactive=True,\n", + " n_samples=256,\n", + " width_px=3\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Volume creation and visualization\n", + "\n", + "Convert the point cloud to a structured volume and visualize it, then slice that volume." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a 3D volume from points\n", + "vol = da.create_vol(data.points, data.intensities)\n", + "print(\"Created volume dimensions:\", vol.dimensions)\n", + "print(\"Volume spacing:\", vol.spacing)\n", + "print(\"Volume origin:\", vol.origin)\n", + "\n", + "# Visualize the volume\n", + "# Tip: try different colormaps like 'plasma', 'viridis', 'jet'\n", + "da.show_vol(vol, cmap='plasma')\n", + "\n", + "# Slice the volume directly (canonical HL plane)\n", + "sl_from_vol = da.slice_data(\n", + " data=vol,\n", + " hkl='HL',\n", + " shape=(400, 400),\n", + " intensity_range=(None, None), # no pre-filtering\n", + " show=True,\n", + " axis_display='hkl',\n", + " show_grid=True\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Enhanced slice visualization options\n", + "\n", + "Use min/max intensity thresholds and clim for display; switch label format; keep per-pixel physical size consistent when changing shape." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Display with custom limits and HKL labels\n", + "_ = da.show_slice(\n", + " sl_from_vol,\n", + " cmap='coolwarm',\n", + " clim=(0, 10000),\n", + " min_intensity=100, # filter out low intensity pixels\n", + " max_intensity=50000, # cap high intensity\n", + " axis_display='hkl',\n", + " show_grid=True,\n", + " shape_data=True # keep physical size per pixel when reshaping\n", + ")\n", + "\n", + "# Display with U/V labels\n", + "_ = da.show_slice(\n", + " sl_from_vol,\n", + " cmap='viridis',\n", + " axis_display='uv',\n", + " show_grid=False\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Image-based workflow recap\n", + "\n", + "Get the image/extent, perform line cuts without re-slicing, and reuse cached data for subsequent operations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "img2, extent2 = da.show_slice(sl_from_vol, return_image=True)\n", + "lc_from_image = da.line_cut(\n", + " 'zero',\n", + " param=(0.0, 'x'),\n", + " vol=(img2, extent2),\n", + " n_samples=300,\n", + " width_px=3,\n", + " show=True\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tips & Notes\n", + "\n", + "- Interactive line cuts require an interactive Matplotlib backend. Use `%matplotlib widget` (preferred; requires `ipympl`) or `%matplotlib notebook`.\n", + "- Explore different planes via `hkl` presets ('HK', 'KL', 'HL') or specify `normal=(h,k,l)` and/or a custom origin.\n", + "- Use `axes=((h1,k1,l1), (h2,k2,l2))` to define custom in-plane HKL axes with automatic axis labels. Optionally add a third vector for the plane normal.\n", + "- `slab_thickness` selects points within a thick slice around the plane; combine with `intensity_range=(min, max)` for pre-filtering before interpolation.\n", + "- `show_slice(..., return_image=True)` returns `(img, extent)` for downstream processing (e.g., line cuts) without re-slicing.\n", + "- `create_vol` converts point clouds to a structured volume suitable for volume rendering and volume-based slicing." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "This comprehensive quickstart demonstrated:\n", + "\n", + "1. Loading data and inspecting metadata (dict/text/JSON styles)\n", + "2. 3D point cloud visualization with intensity-based opacity and bounds\n", + "3. Flexible 2D slicing from point clouds and volumes (presets, custom normals/origins, custom axes)\n", + "4. Enhanced slice display controls (labels, grids, intensity thresholds, display limits)\n", + "5. Line cuts with presets, custom endpoints, width averaging, and interactive mode\n", + "6. Volume creation and visualization, followed by slicing and image-based workflows\n", + "\n", + "Use these building blocks to explore complex HKL datasets efficiently and reproducibly." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.14" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/dash_analysis/README.md b/notebooks/dash_analysis/README.md new file mode 100644 index 0000000..99dfea7 --- /dev/null +++ b/notebooks/dash_analysis/README.md @@ -0,0 +1,39 @@ +# Dash Analysis +This is a quick tool for analyzing data + +Download any of the dummy data's for testing + +## Requirements +`python >=3.11` + +## Instructions +1. Download the dash_analysis.ipynb file and this README.md file +into a `` +From [DashPVA/utils](https://github.com/AdvancedPhotonSource/DashPVA/tree/dev/utils) Repo download both `hdf5_loader.py` and `dash_analysis.py`
+*(4 files in total)* + +2. `cd ` + +3. Install uv [Link here](https://github.com/astral-sh/uv?tab=readme-ov-file) + +4. Create uv env +`uv venv` + +5. For Mac/Linux +`source .venv/bin/activate`
+For Windows +`venv\Scripts\activate` + +6. Install dependencies +`uv pip install -r requirements.txt` + +7. Open the notebook and follow the instructions from there + +Struct Example +``` +folder/ + |_____dash_analysis.py + |_____hdf5_loader.py + |_____dash_analysis.inpynb + |_____.venv/ +``` \ No newline at end of file diff --git a/notebooks/dash_analysis/requirements.txt b/notebooks/dash_analysis/requirements.txt new file mode 100644 index 0000000..dfd637c --- /dev/null +++ b/notebooks/dash_analysis/requirements.txt @@ -0,0 +1,49 @@ +asttokens==3.0.0 +certifi==2025.10.5 +charset-normalizer==3.4.4 +comm==0.2.3 +contourpy==1.3.3 +cycler==0.12.1 +debugpy==1.8.17 +decorator==5.2.1 +executing==2.2.1 +fonttools==4.60.1 +h5py==3.15.1 +hdf5plugin==6.0.0 +idna==3.11 +ipykernel==7.1.0 +ipython==9.6.0 +ipython-pygments-lexers==1.1.1 +jedi==0.19.2 +jupyter-client==8.6.3 +jupyter-core==5.9.1 +kiwisolver==1.4.9 +matplotlib==3.10.7 +matplotlib-inline==0.2.1 +nest-asyncio==1.6.0 +numpy==2.3.4 +packaging==25.0 +parso==0.8.5 +pexpect==4.9.0 +pillow==12.0.0 +platformdirs==4.5.0 +pooch==1.8.2 +prompt-toolkit==3.0.52 +psutil==7.1.2 +ptyprocess==0.7.0 +pure-eval==0.2.3 +pygments==2.19.2 +pyparsing==3.2.5 +python-dateutil==2.9.0.post0 +pyvista==0.46.4 +pyzmq==27.1.0 +requests==2.32.5 +scooby==0.10.2 +six==1.17.0 +stack-data==0.6.3 +tornado==6.5.2 +traitlets==5.14.3 +typing-extensions==4.15.0 +urllib3==2.5.0 +vtk==9.5.2 +wcwidth==0.2.14 diff --git a/pv_configs/metadata_pvs.toml b/pv_configs/metadata_pvs.toml index f466959..3fce0f7 100644 --- a/pv_configs/metadata_pvs.toml +++ b/pv_configs/metadata_pvs.toml @@ -1,122 +1,133 @@ # Required Setup -CONSUMER_TYPE = "spontaneous" +DETECTOR_PREFIX = 'dp-ADSim' +OUTPUT_FILE_LOCATION = 'NEW_TEST.h5' # file must end with .h5 # can probably be made into it's own section +CONSUMER_MODE = 'continuous' # options: continuous or vectorized, describes how consumer is performing analysis calculations -# Section used specifically for Metadata Pvs +[CACHE_OPTIONS] +CACHING_MODE = '' # options: '', 'alignment', 'scan', or 'bin' describes whether the caching should be done the entire time, when a flag is seen, or if it should be put into bins based on number of frames + + [CACHE_OPTIONS.ALIGNMENT] + MAX_CACHE_SIZE = 1000 + + [CACHE_OPTIONS.SCAN] + # FLAG PV MUST BE IN METADATA SECTION + FLAG_PV = 'ScanOn:Value' # a PV that will act as a flag to start caching + START_SCAN = true + STOP_SCAN = false + THRESHOLD = 0.05 # use if start and stop values are not binary/boolean + MAX_CACHE_SIZE = 1000 + + [CACHE_OPTIONS.BIN] + COUNT = 10 + SIZE = 16 + +# Section for Metadata PVs [METADATA] [METADATA.CA] - X = "x" - Y = "y" + # FLAG_PV = 'ScanOn:Value' [METADATA.PVA] -# Section specifically for ROI PVs -[ROI] - - [ROI.ROI1] - MIN_X = "dp-ADSim:ROI1:MinX" - MIN_Y = "dp-ADSim:ROI1:MinY" - SIZE_X = "dp-ADSim:ROI1:SizeX" - SIZE_Y = "dp-ADSim:ROI1:SizeY" - - # [ROI.ROI2] - # MIN_X = "dp-ADSim:ROI2:MinX" - # MIN_Y = "dp-ADSim:ROI2:MinY" - # SIZE_X = "dp-ADSim:ROI2:SizeX" - # SIZE_Y = "dp-ADSim:ROI2:SizeY" - - # [ROI.ROI3] - # MIN_X = "dp-ADSim:ROI3:MinX" - # MIN_Y = "dp-ADSim:ROI3:MinY" - # SIZE_X = "dp-ADSim:ROI3:SizeX" - # SIZE_Y = "dp-ADSim:ROI3:SizeY" - - [ROI.ROI4] - MIN_X = "dp-ADSim:ROI4:MinX" - MIN_Y = "dp-ADSim:ROI4:MinY" - SIZE_X = "dp-ADSim:ROI4:SizeX" - SIZE_Y = "dp-ADSim:ROI4:SizeY" - -[STATS] - -[STATS.STATS1] - TOTAL = "dp-ADSim:Stats1:Total_RBV" - MIN = "dp-ADSim:Stats1:MinValue_RBV" - MAX = "dp-ADSim:Stats1:MaxValue_RBV" - SIGMA = "dp-ADSim:Stats1:Sigma_RBV" - MEAN = "dp-ADSim:Stats1:MeanValue_RBV" - - [STATS.STATS4] - TOTAL = "dp-ADSim:Stats4:Total_RBV" - MIN = "dp-ADSim:Stats4:MinValue_RBV" - MAX = "dp-ADSim:Stats4:MaxValue_RBV" - SIGMA = "dp-ADSim:Stats4:Sigma_RBV" - MEAN = "dp-ADSim:Stats4:MeanValue_RBV" - -# For use in the analysis server, not on the client side. +# Section for ROI PVs +# [ROI] + +# [ROI.ROI1] +# MIN_X = 'dp-ADSim:ROI1:MinX' +# MIN_Y = 'dp-ADSim:ROI1:MinY' +# SIZE_X = 'dp-ADSim:ROI1:SizeX' +# SIZE_Y = 'dp-ADSim:ROI1:SizeY' + +# [ROI.ROI2] +# MIN_X = 'dp-ADSim:ROI2:MinX' +# MIN_Y = 'dp-ADSim:ROI2:MinY' +# SIZE_X = 'dp-ADSim:ROI2:SizeX' +# SIZE_Y = 'dp-ADSim:ROI2:SizeY' + +# [ROI.ROI3] +# MIN_X = 'dp-ADSim:ROI3:MinX' +# MIN_Y = 'dp-ADSim:ROI3:MinY' +# SIZE_X = 'dp-ADSim:ROI3:SizeX' +# SIZE_Y = 'dp-ADSim:ROI3:SizeY' + +# [ROI.ROI4] +# MIN_X = 'dp-ADSim:ROI4:MinX' +# MIN_Y = 'dp-ADSim:ROI4:MinY' +# SIZE_X = 'dp-ADSim:ROI4:SizeX' +# SIZE_Y = 'dp-ADSim:ROI4:SizeY' + +# Section for Stats PVs +# [STATS] + +# [STATS.STATS1] +# TOTAL = 'dp-ADSim:Stats1:Total_RBV' +# MIN = 'dp-ADSim:Stats1:MinValue_RBV' +# MAX = 'dp-ADSim:Stats1:MaxValue_RBV' +# SIGMA = 'dp-ADSim:Stats1:Sigma_RBV' +# MEAN = 'dp-ADSim:Stats1:MeanValue_RBV' + +# [STATS.STATS4] +# TOTAL = 'dp-ADSim:Stats4:Total_RBV' +# MIN = 'dp-ADSim:Stats4:MinValue_RBV' +# MAX = 'dp-ADSim:Stats4:MaxValue_RBV' +# SIGMA = 'dp-ADSim:Stats4:Sigma_RBV' +# MEAN = 'dp-ADSim:Stats4:MeanValue_RBV' + +# Section for Analysis PVs +# These PVs are used in the analysis server, not on the User side. [ANALYSIS] - # substitute with real PVs that are also in Metadata - AXIS1 = "x" - AXIS2 = "y" + # Make sure to use PVs that are also in METADATA section + AXIS1 = 'x' + AXIS2 = 'y' [HKL] [HKL.SAMPLE_CIRCLE_AXIS_1] AXIS_NUMBER = '6idb1:m28_RBV:AxisNumber' DIRECTION_AXIS = '6idb1:m28_RBV:DirectionAxis' - NAME = '6idb1:m28_RBV:Name' + # NAME = '6idb1:m28_RBV:Name' POSITION = '6idb1:m28_RBV:Position' - SPEC_MOTOR_NAME = '6idb1:m28_RBV:SpecMotorName' + # SPEC_MOTOR_NAME = '6idb1:m28_RBV:SpecMotorName' [HKL.SAMPLE_CIRCLE_AXIS_2] AXIS_NUMBER = '6idb1:m17_RBV:AxisNumber' DIRECTION_AXIS = '6idb1:m17_RBV:DirectionAxis' - NAME = '6idb1:m17_RBV:Name' + # NAME = '6idb1:m17_RBV:Name' POSITION = '6idb1:m17_RBV:Position' - SPEC_MOTOR_NAME = '6idb1:m17_RBV:SpecMotorName' + # SPEC_MOTOR_NAME = '6idb1:m17_RBV:SpecMotorName' [HKL.SAMPLE_CIRCLE_AXIS_3] AXIS_NUMBER = '6idb1:m19_RBV:AxisNumber' DIRECTION_AXIS = '6idb1:m19_RBV:DirectionAxis' - NAME = '6idb1:m19_RBV:Name' + # NAME = '6idb1:m19_RBV:Name' POSITION = '6idb1:m19_RBV:Position' - SPEC_MOTOR_NAME = '6idb1:m19_RBV:SpecMotorName' + # SPEC_MOTOR_NAME = '6idb1:m19_RBV:SpecMotorName' [HKL.SAMPLE_CIRCLE_AXIS_4] AXIS_NUMBER = '6idb1:m20_RBV:AxisNumber' DIRECTION_AXIS = '6idb1:m20_RBV:DirectionAxis' - NAME = '6idb1:m20_RBV:Name' + # NAME = '6idb1:m20_RBV:Name' POSITION = '6idb1:m20_RBV:Position' - SPEC_MOTOR_NAME = '6idb1:m20_RBV:SpecMotorName' + # SPEC_MOTOR_NAME = '6idb1:m20_RBV:SpecMotorName' [HKL.DETECTOR_CIRCLE_AXIS_1] AXIS_NUMBER = '6idb1:m29_RBV:AxisNumber' DIRECTION_AXIS = '6idb1:m29_RBV:DirectionAxis' - NAME = '6idb1:m29_RBV:Name' + # NAME = '6idb1:m29_RBV:Name' POSITION = '6idb1:m29_RBV:Position' - SPEC_MOTOR_NAME = '6idb1:m29_RBV:SpecMotorName' + # SPEC_MOTOR_NAME = '6idb1:m29_RBV:SpecMotorName' [HKL.DETECTOR_CIRCLE_AXIS_2] AXIS_NUMBER = '6idb1:m18_RBV:AxisNumber' DIRECTION_AXIS = '6idb1:m18_RBV:DirectionAxis' - NAME = '6idb1:m18_RBV:Name' + # NAME = '6idb1:m18_RBV:Name' POSITION = '6idb1:m18_RBV:Position' - SPEC_MOTOR_NAME = '6idb1:m18_RBV:SpecMotorName' + # SPEC_MOTOR_NAME = '6idb1:m18_RBV:SpecMotorName' [HKL.SPEC] ENERGY_VALUE = '6idb:spec:Energy:Value' -#TODO: make RSM consumer -# These values will not change often so they can be changed to values and read in by the rsm consumer UB_MATRIX_VALUE = '6idb:spec:UB_matrix:Value' - [HKL.DETECTOR_SETUP] - CENTER_CHANNEL_PIXEL = 'DetectorSetup:CenterChannelPixel' - DISTANCE = 'DetectorSetup:Distance' - PIXEL_DIRECTION_1 = 'DetectorSetup:PixelDirection1' - PIXEL_DIRECTION_2 = 'DetectorSetup:PixelDirection2' - SIZE = 'DetectorSetup:Size' - UNITS = 'DetectorSetup:Units' - [HKL.PRIMARY_BEAM_DIRECTION] AXIS_NUMBER_1 = 'PrimaryBeamDirection:AxisNumber1' AXIS_NUMBER_2 = 'PrimaryBeamDirection:AxisNumber2' @@ -131,3 +142,13 @@ CONSUMER_TYPE = "spontaneous" AXIS_NUMBER_1 = 'SampleSurfaceNormalDirection:AxisNumber1' AXIS_NUMBER_2 = 'SampleSurfaceNormalDirection:AxisNumber2' AXIS_NUMBER_3 = 'SampleSurfaceNormalDirection:AxisNumber3' + + [HKL.DETECTOR_SETUP] + CENTER_CHANNEL_PIXEL = 'DetectorSetup:CenterChannelPixel' + DISTANCE = 'DetectorSetup:Distance' + PIXEL_DIRECTION_1 = 'DetectorSetup:PixelDirection1' + PIXEL_DIRECTION_2 = 'DetectorSetup:PixelDirection2' + SIZE = 'DetectorSetup:Size' + UNITS = 'DetectorSetup:Units' + + diff --git a/pv_configs/sample_config.toml b/pv_configs/sample_config.toml new file mode 100644 index 0000000..59f6f1e --- /dev/null +++ b/pv_configs/sample_config.toml @@ -0,0 +1,156 @@ +# Required Setup +DETECTOR_PREFIX = 'prefixs' +OUTPUT_FILE_LOCATION = 'OUTPUT.h5' # file must end with .h5 # can probably be made into it's own section +CONSUMER_MODE = 'continuous' # options: continuous or vectorized, describes how consumer is performing analysis calculations + +[CACHE_OPTIONS] +CACHING_MODE = 'alignment' # options: '', 'alignment', 'scan', or 'bin' describes whether the caching should be done the entire time, when a flag is seen, or if it should be put into bins based on number of frames + + [CACHE_OPTIONS.ALIGNMENT] + MAX_CACHE_SIZE = 1000 + + [CACHE_OPTIONS.SCAN] + # FLAG PV MUST BE IN METADATA SECTION + #FLAG_PV = 'ScanOn:Value' # a PV that will act as a flag to start caching + START_SCAN = true + STOP_SCAN = false + THRESHOLD = 0.05 # use if start and stop values are not binary/boolean + MAX_CACHE_SIZE = 1000 + + [CACHE_OPTIONS.BIN] + COUNT = 10 + SIZE = 16 + +# Section for Metadata PVs +[METADATA] + + [METADATA.CA] + FLAG_PV = 'ScanOn:Value' + FILE_PATH = 'FilePath:Value' + FILE_NAME = 'FileName:Value' + + [METADATA.PVA] + +# Section for ROI PVs +[ROI] + + [ROI.ROI1] + MIN_X = 'dp-ADSim:ROI1:MinX' + MIN_Y = 'dp-ADSim:ROI1:MinY' + SIZE_X = 'dp-ADSim:ROI1:SizeX' + SIZE_Y = 'dp-ADSim:ROI1:SizeY' + + [ROI.ROI2] + MIN_X = 'dp-ADSim:ROI2:MinX' + MIN_Y = 'dp-ADSim:ROI2:MinY' + SIZE_X = 'dp-ADSim:ROI2:SizeX' + SIZE_Y = 'dp-ADSim:ROI2:SizeY' + + [ROI.ROI3] + MIN_X = 'dp-ADSim:ROI3:MinX' + MIN_Y = 'dp-ADSim:ROI3:MinY' + SIZE_X = 'dp-ADSim:ROI3:SizeX' + SIZE_Y = 'dp-ADSim:ROI3:SizeY' + + [ROI.ROI4] + MIN_X = 'dp-ADSim:ROI4:MinX' + MIN_Y = 'dp-ADSim:ROI4:MinY' + SIZE_X = 'dp-ADSim:ROI4:SizeX' + SIZE_Y = 'dp-ADSim:ROI4:SizeY' + +# Section for Stats PVs +[STATS] + + [STATS.STATS1] + TOTAL = 'dp-ADSim:Stats1:Total_RBV' + MIN = 'dp-ADSim:Stats1:MinValue_RBV' + MAX = 'dp-ADSim:Stats1:MaxValue_RBV' + SIGMA = 'dp-ADSim:Stats1:Sigma_RBV' + MEAN = 'dp-ADSim:Stats1:MeanValue_RBV' + + [STATS.STATS4] + TOTAL = 'dp-ADSim:Stats4:Total_RBV' + MIN = 'dp-ADSim:Stats4:MinValue_RBV' + MAX = 'dp-ADSim:Stats4:MaxValue_RBV' + SIGMA = 'dp-ADSim:Stats4:Sigma_RBV' + MEAN = 'dp-ADSim:Stats4:MeanValue_RBV' + +# Section for Analysis PVs +# These PVs are used in the analysis server, not on the User side. +[ANALYSIS] + # Make sure to use PVs that are also in METADATA section + AXIS1 = 'x' + AXIS2 = 'y' + +[HKL] + + [HKL.SAMPLE_CIRCLE_AXIS_1] + AXIS_NUMBER = 'detector:motor:AxisNumber' + DIRECTION_AXIS = 'detector:motor:DirectionAxis' + # NAME = 'detector:motor:Name' + POSITION = 'detector:motor:Position' + # SPEC_MOTOR_NAME = 'detector:motor:SpecMotorName' + + [HKL.SAMPLE_CIRCLE_AXIS_2] + AXIS_NUMBER = 'detector:motor:AxisNumber' + DIRECTION_AXIS = 'detector:motor:DirectionAxis' + # NAME = 'detector:motor:Name' + POSITION = 'detector:motor:Position' + # SPEC_MOTOR_NAME = 'detector:motor:SpecMotorName' + + [HKL.SAMPLE_CIRCLE_AXIS_3] + AXIS_NUMBER = 'detector:motor:AxisNumber' + DIRECTION_AXIS = 'detector:motor:DirectionAxis' + # NAME = 'detector:motor:Name' + POSITION = 'detector:motor:Position' + # SPEC_MOTOR_NAME = 'detector:motor:SpecMotorName' + + [HKL.SAMPLE_CIRCLE_AXIS_4] + AXIS_NUMBER = 'detector:motor:AxisNumber' + DIRECTION_AXIS = 'detector:motor:DirectionAxis' + # NAME = 'detector:motor:Name' + POSITION = 'detector:motor:Position' + # SPEC_MOTOR_NAME = 'detector:motor:SpecMotorName' + + [HKL.DETECTOR_CIRCLE_AXIS_1] + AXIS_NUMBER = 'detector:motor:AxisNumber' + DIRECTION_AXIS = 'detector:motor:DirectionAxis' + # NAME = 'detector:motor:Name' + POSITION = 'detector:motor:Position' + # SPEC_MOTOR_NAME = 'detector:motor:SpecMotorName' + + [HKL.DETECTOR_CIRCLE_AXIS_2] + AXIS_NUMBER = 'detector:motor:AxisNumber' + DIRECTION_AXIS = 'detector:motor:DirectionAxis' + # NAME = 'detector:motor:Name' + POSITION = 'detector:motor:Position' + # SPEC_MOTOR_NAME = 'detector:motor:SpecMotorName' + + [HKL.SPEC] + ENERGY_VALUE = 'detector:spec:Energy:Value' + UB_MATRIX_VALUE = 'detector:spec:UB_matrix:Value' + + [HKL.PRIMARY_BEAM_DIRECTION] + AXIS_NUMBER_1 = 'PrimaryBeamDirection:AxisNumber1' + AXIS_NUMBER_2 = 'PrimaryBeamDirection:AxisNumber2' + AXIS_NUMBER_3 = 'PrimaryBeamDirection:AxisNumber3' + + [HKL.INPLANE_REFERENCE_DIRECITON] + AXIS_NUMBER_1 = 'InplaneReferenceDirection:AxisNumber1' + AXIS_NUMBER_2 = 'InplaneReferenceDirection:AxisNumber2' + AXIS_NUMBER_3 = 'InplaneReferenceDirection:AxisNumber3' + + [HKL.SAMPLE_SURFACE_NORMAL_DIRECITON] + AXIS_NUMBER_1 = 'SampleSurfaceNormalDirection:AxisNumber1' + AXIS_NUMBER_2 = 'SampleSurfaceNormalDirection:AxisNumber2' + AXIS_NUMBER_3 = 'SampleSurfaceNormalDirection:AxisNumber3' + + [HKL.DETECTOR_SETUP] + CENTER_CHANNEL_PIXEL = 'DetectorSetup:CenterChannelPixel' + DISTANCE = 'DetectorSetup:Distance' + PIXEL_DIRECTION_1 = 'DetectorSetup:PixelDirection1' + PIXEL_DIRECTION_2 = 'DetectorSetup:PixelDirection2' + SIZE = 'DetectorSetup:Size' + UNITS = 'DetectorSetup:Units' + + diff --git a/pva_setup/pva_workflow_setup_dialog.py b/pva_setup/pva_workflow_setup_dialog.py index 6bf489f..87b58c7 100755 --- a/pva_setup/pva_workflow_setup_dialog.py +++ b/pva_setup/pva_workflow_setup_dialog.py @@ -4,9 +4,16 @@ import os import signal import toml +import json from PyQt5 import QtWidgets, uic, QtCore -from PyQt5.QtWidgets import QFileDialog, QDialog +from PyQt5.QtWidgets import QFileDialog, QDialog, QTextEdit, QTreeWidgetItem, QHeaderView from PyQt5.QtCore import pyqtSignal, QObject +from datetime import datetime +import sys +import pathlib +sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) +from utils import PVAReader +from functools import partial class Worker(QObject): """ @@ -92,6 +99,8 @@ def browse_config_upload(self): file_name, _ = QFileDialog.getOpenFileName(self, 'Select Metadata Config File', '', 'TOML Files (*.toml)') if file_name: self.lineEditConfigUploadPath.setText(file_name) + # Update current mode label based on uploaded TOML + self.update_current_mode_label(file_name) # Browse functions for AssociatorConsumers def browse_processor_file_associator(self): @@ -157,6 +166,23 @@ def parse_toml(self, path) -> dict: with open(path, 'r') as f: toml_data: dict = toml.load(f) return toml_data + + def update_current_mode_label(self, path: str) -> None: + """ + Parses the provided TOML file and updates the 'Current Mode' label + on the Config Upload tab with CACHE_OPTIONS.CACHING_MODE. + """ + text = '(none)' + try: + toml_data = self.parse_toml(path) + mode = toml_data.get('CACHE_OPTIONS', {}).get('CACHING_MODE', '') + text = mode if mode else '(none)' + except Exception: + text = '(error)' + try: + self.labelCurrentModeValue.setText(text) + except Exception: + pass def parse_metadata_channels(self, metadata_config_path) -> str: """ @@ -170,18 +196,25 @@ def parse_metadata_channels(self, metadata_config_path) -> str: """ pv_config = self.parse_toml(path=metadata_config_path) metadata_config : dict = pv_config.get("METADATA", {}) + hkl_config : dict = pv_config.get('HKL', {}) + ca_pvs = "" + pva_pvs = "" if metadata_config and metadata_config is not None: ca = metadata_config.get("CA", {}) pva = metadata_config.get("PVA", {}) - ca_pvs = "" - pva_pvs = "" + if ca: - for value in list(ca.values()): + for value in ca.values(): ca_pvs += f"ca://{value}," if pva: - for value in list(pva.values()): + for value in pva.values(): pva_pvs += f"pva://{value}," + + if hkl_config and hkl_config is not None: + for pvs_dict in hkl_config.values(): + for pv_channel in pvs_dict.values(): + ca_pvs += f"ca://{pv_channel}," all_pvs = ca_pvs.strip(',') if not(pva_pvs) else ca_pvs + pva_pvs.strip(',') return all_pvs @@ -305,6 +338,7 @@ def run_associator_consumers(self): '--server-queue-size', str(self.spinBoxServerQueueSizeAssociator.value()), '--n-consumers', str(self.spinBoxNConsumersAssociator.value()), '--distributor-updates', str(self.spinBoxDistributorUpdatesAssociator.value()), + '-dc' ] # Add metadata config file if specified @@ -314,6 +348,9 @@ def run_associator_consumers(self): if metadata_pvs: cmd.extend(['--metadata-channels', metadata_pvs]) + # print(' '.join(cmd)) + + try: process = subprocess.Popen( cmd, @@ -326,7 +363,8 @@ def run_associator_consumers(self): # Start thread to read output worker = Worker(process) - worker.output_signal.connect(self.textEditAssociatorConsumersOutput.appendPlainText) + output_format = partial(self._format_and_append_output, target_widget=self.textEditAssociatorConsumersOutput) + worker.output_signal.connect(output_format) thread = threading.Thread(target=worker.run) thread.daemon = True thread.start() @@ -354,6 +392,32 @@ def stop_associator_consumers(self): self.buttonStopAssociatorConsumers.setEnabled(False) self.labelStatusAssociatorConsumers.setText('Process ID: Not running') self.textEditAssociatorConsumersOutput.appendPlainText('Associator Consumers stopped.') + + + # ... inside your PVASetupDialog class ... + + def _format_and_append_output(self, text: str, target_widget: QTextEdit): + """ + Receives text, formats it with HTML, and appends it to the specified widget. + """ + # Get a timestamp for logging + timestamp = datetime.now().strftime('%H:%M:%S') + + # Make the text safe to insert into HTML + safe_text = text.replace('<', '<').replace('>', '>') + + # Apply color based on keywords + color = "#000000" # Default to white (or your theme's default) + if "ERROR" in text.upper(): + color = "#FF5733" # Red + elif "WARNING" in text.upper(): + color = "#FFC300" # Orange + elif "SUCCESS" in text.upper() or "done" in text.lower(): + color = "#33FF57" # Green + #xmessage = safe_text['processorStatus'] + formatted_line = f"{timestamp} {safe_text}" + + target_widget.appendHtml(formatted_line) # Run and Stop functions for Collector def run_collector(self): @@ -366,10 +430,13 @@ def run_collector(self): QtWidgets.QMessageBox.warning(self, 'Warning', 'Collector is already running.') return + producer_id_list = [str(i) for i in range(1, int(self.lineEditProducerIdList.text())+1)] + producer_id_list = ','.join(producer_id_list) + cmd = [ 'pvapy-hpc-collector', '--collector-id', str(self.spinBoxCollectorId.value()), - '--producer-id-list', self.lineEditProducerIdList.text(), + '--producer-id-list', producer_id_list, '--input-channel', self.lineEditInputChannelCollector.text(), '--control-channel', self.lineEditControlChannelCollector.text(), '--status-channel', self.lineEditStatusChannelCollector.text(), @@ -449,7 +516,10 @@ def run_analysis_consumer(self): '--processor-file', self.lineEditProcessorFileAnalysis.text(), '--processor-class', self.lineEditProcessorClassAnalysis.text(), '--report-period', str(self.spinBoxReportPeriodAnalysis.value()), - '--server-queue-size', str(self.spinBoxServerQueueSizeAnalysis.value()) + '--server-queue-size', str(self.spinBoxServerQueueSizeAnalysis.value()), + '--n-consumers', str(self.spinBoxNConsumersAnalysis.value()), + '--distributor-updates', str(self.spinBoxDistributorUpdatesAnalysis.value()), + '-dc' ] # Add metadata config file if specified @@ -498,6 +568,68 @@ def stop_analysis_consumer(self): self.labelStatusAnalysisConsumer.setText('Process ID: Not running') self.textEditAnalysisConsumerOutput.appendPlainText('Analysis Consumer stopped.') + def show_performance(self): + if not self.performance_dialog: + self.performance_dialog = PerformanceDialog( + parent=self, + status_channel=self.lineEditStatusChannelAnalysis.text(), + data_channel=self.lineEditOutputChannelAnalysis.text() + ) + self.performance_dialog.show() + self.performance_dialog.raise_() # Bring window to the front + self.performance_dialog.activateWindow() + + def fetch_and_print_stats(self, consumer_id: int = 1, status_channel_base: str = 'processor:*:status') -> dict: + """ + Fetches, unpacks, and prints processor stats from a PVA status channel. + + Args: + consumer_id (int): The ID of the consumer to get stats from. + status_channel_base (str): The base name of the status channel. + + Returns: + dict: A dictionary containing the unpacked stats. + """ + print('Printing stats') + channel_name = status_channel_base.replace('*', str(consumer_id)) + + try: + # Use a temporary channel to get the stats just once + stats_channel = PVAReader(input_channel=channel_name) + pv_object = stats_channel.channel.get() + + # Check for the existence of the stats dictionary + if 'userStats' not in pv_object: + print(f"Warning: 'userStats' not found in PV from {channel_name}") + return {} + + # Unpack the userStats dictionary + stats = { + key: val.value if hasattr(val, 'value') else val + for key, val in pv_object['userStats'].items() + } + + # Write stats to a file for analysis + with open('stats_output.txt', 'w') as f: + f.write(f"Number of frames processed: {self.spinBoxNf.value()}\n") + f.write(f"Hertz: {self.spinBoxFps.value()}\n") + f.write(f"Image size: {self.spinBoxNx.value()} x {self.spinBoxNy.value()}\n\n") + f.write(f"Latency Stats from {channel_name}:\n{pv_object}\n\n") + + f.write("**** Metadata ****\n") + f.write(f"First Frame ID: {first_frame_id}\n") + f.write(f"First Frame Internal Processing Time: {first_frame_time:.5f} ms\n") + f.write(f"First Frame Timestamp: {first_frame_timestamp}\n") + + return stats + + except Exception as e: + import traceback + with open('error_output.txt', 'w') as f: + f.write(f"Error getting stats from {channel_name}:\n") + f.write(traceback.format_exc()) + return {} + def closeEvent(self, event): """ Handles the dialog close event by terminating all active subprocesses. diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..21c0dff --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,87 @@ +[project] +name = "DashPVA" +version = "0.1.0" +description = "Distributed Analysis and Streaming Hub with Process Variable Access" +requires-python = ">=3.11,<3.12" +dependencies = [ + "numpy>=2.2.0", + "scipy>=1.15.0", + "pyqt5>=5.15.0", + "pyqtgraph>=0.13.0", + "h5py>=3.12.0", + "xrayutilities>=1.7.0", + "pyepics>=3.5.0", + "pvapy>=5.5.0", + "toml>=0.10.0", + "dash>=2.18.0", + "dash-core-components>=2.0.0", + "dash-html-components>=2.0.0", + "dash-table>=5.0.0", + "matplotlib>=3.10.0", + "pandas>=2.2.0", + "flask>=3.1.0", + "requests>=2.32.0", + "pyyaml>=6.0.0", + "pyvista[jupyter]>=0.44.0", + "pyvistaqt>=0.11.0", + "open3d>=0.19.0", + "scikit-learn>=1.6.0", + "ipython>=9.0.0", + "ipywidgets>=8.1.0", + "jupyter-core>=5.7.0", + "jupyterlab-widgets>=3.0.0", + "joblib>=1.4.0", + "tqdm>=4.67.0", + "configargparse>=1.7", + "addict>=2.4.0", + "blosc2>=3.2.0", + "bitshuffle>=0.5.0", + "lz4>=4.4.0", + "numexpr>=2.10.0", + "lmfit>=1.3.0", + "uncertainties>=3.2.0", + "qtpy>=2.4.0", + "pyquaternion>=0.9.0", + "pytz>=2025.1", + "superqt>=0.6.7", + "scikit-image>=0.22.0", + "opencv-python>=4.8.0", + "fabio>=2024.0.0", + "pillow>=12.0.0", + "hdf5plugin>=4.0.0", + "trame>=3.12.0", + "ipympl>=0.9.8", + "sqlalchemy>=2.0.44", + "markdown>=3.10", + "jupyter>=1.1.1", + "qtawesome>=1.4.0", + "torch>=2.9.1", + "torchvision>=0.24.1", + "segment-anything", + "pycocotools>=2.0.11", + "onnxruntime>=1.23.2", + "onnx>=1.20.0", + "deepinv>=0.3.7", + "click>=8.1.7", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.uv] + +[tool.uv.sources] +segment-anything = { git = "https://github.com/facebookresearch/segment-anything.git" } +# dev-dependencies moved to dependency-groups (see below) + +[project.dependency-groups] +dev = [ + "pytest>=7.0.0", + "black>=23.0.0", + "ruff>=0.1.0", +] + +# Note: pvapy is available on PyPI and can be installed via UV diff --git a/settings.py b/settings.py new file mode 100644 index 0000000..034109e --- /dev/null +++ b/settings.py @@ -0,0 +1,428 @@ +""" +Centralized settings module for DashPVA. + +Exports constants resolved from the currently selected configuration source +(TOML file or DB profile), with helper functions to control the locator and +refresh values. By default it uses the database-selected profile in dashpva.db. +If a TOML path is provided (via set_locator or the DASPVA_CONFIG_LOCATOR env var), +it will use that TOML instead. + +Usage: + - Programmatic selection: + import settings + settings.set_locator('/path/to/config.toml') # or int profile_id, or "profile:" + settings.reload() + + - Optional env var override: + export DASPVA_CONFIG_LOCATOR=profile:my_profile + # or export DASPVA_CONFIG_LOCATOR=/abs/path/config.toml + # or export DASPVA_CONFIG_LOCATOR=42 (profile id) + + - Diagnostics: + settings.SOURCE_TYPE -> "toml" or "db" + settings.LOCATOR -> the current locator (str path, "profile:", or int id) + settings.CONFIG -> full configuration dictionary + settings.ensure_path() -> a TOML path (original path or temp file when using DB) +""" + +from typing import Any, Dict, Optional, Union +import os +import tempfile +import toml +from pathlib import Path + +from utils.config.repository import ConfigRepository +from utils.config.interfaces import ConfigSource +from database import DatabaseInterface + + +# User variables +BEAMLINE_NAME: Optional[str] = None + +# Core +PROJECT_ROOT = Path(__file__).resolve().parent +DETECTOR_PREFIX: Optional[str] = None +OUTPUT_FILE_LOCATION: Optional[str] = None +CONSUMER_MODE: Optional[str] = None + +# Cache + convenience +CACHING_MODE: Optional[str] = None +CACHE_OPTIONS: Dict[str, Any] = {} +ALIGNMENT_MAX_CACHE_SIZE: Optional[int] = None +SCAN_FLAG_PV: Optional[str] = None +SCAN_START_SCAN: Optional[bool] = None +SCAN_STOP_SCAN: Optional[bool] = None +SCAN_THRESHOLD: Optional[float] = None +SCAN_MAX_CACHE_SIZE: Optional[int] = None +BIN_COUNT: Optional[int] = None +BIN_SIZE: Optional[int] = None + +# Sections +METADATA_CA: Dict[str, Any] = {} +METADATA_PVA: Dict[str, Any] = {} +ROI: Dict[str, Any] = {} +STATS: Dict[str, Any] = {} +HKL: Dict[str, Any] = {} +ANALYSIS: Dict[str, Any] = {} + +#AppSettings +LOG_PATH: Optional[str] = None +OUTPUT_PATH: Optional[str] = None +CONFIG_PATH: Optional[str] = None +CONSUMERS_PATH: Optional[str] = None + +# Diagnostics +CONFIG: Dict[str, Any] = {} +SOURCE_TYPE: Optional[str] = None +LOCATOR: Optional[Union[int, str]] = None + +# Internal state +_repo = ConfigRepository(DatabaseInterface()) +_default_toml = 'pv_configs/sample_config.toml' +_locator_internal: Optional[Union[int, str]] = None + + +def set_locator(locator: Union[int, str]) -> None: + """Set the configuration locator (TOML path, "profile:", or int profile_id).""" + global _locator_internal + _locator_internal = locator + + +def ensure_path() -> Optional[str]: + """Return a TOML path (real path for TOML; temp file for DB) via ConfigRepository.ensure_path.""" + eff = _get_effective_locator() + return _repo.ensure_path(eff) + + +def reload() -> None: + """Re-resolve current LOCATOR and repopulate all exported constants from the configuration source.""" + global CONFIG, SOURCE_TYPE, LOCATOR + global DETECTOR_PREFIX, OUTPUT_FILE_LOCATION, CONSUMER_MODE + global CACHING_MODE, CACHE_OPTIONS, ALIGNMENT_MAX_CACHE_SIZE + global SCAN_FLAG_PV, SCAN_START_SCAN, SCAN_STOP_SCAN, SCAN_THRESHOLD, SCAN_MAX_CACHE_SIZE + global BIN_COUNT, BIN_SIZE + global METADATA_CA, METADATA_PVA, ROI, STATS, HKL, ANALYSIS + + eff = _get_effective_locator() + LOCATOR = eff + cfg = _repo.load(eff) if eff is not None else {} + CONFIG = cfg + + # Source type + src = _repo.resolve(eff) + SOURCE_TYPE = getattr(src, 'source_type', None) if src else None + + # Core + DETECTOR_PREFIX = cfg.get('DETECTOR_PREFIX') + OUTPUT_FILE_LOCATION = cfg.get('OUTPUT_FILE_LOCATION') + CONSUMER_MODE = cfg.get('CONSUMER_MODE') + + # Cache and convenience + CACHE_OPTIONS = cfg.get('CACHE_OPTIONS', {}) or {} + CACHING_MODE = CACHE_OPTIONS.get('CACHING_MODE') + + # ALIGNMENT + ALIGNMENT_MAX_CACHE_SIZE = None + try: + ALIGNMENT_MAX_CACHE_SIZE = int(CACHE_OPTIONS.get('ALIGNMENT', {}).get('MAX_CACHE_SIZE')) + except Exception: + pass + + # SCAN + scan = CACHE_OPTIONS.get('SCAN', {}) or {} + # Flag PV comes from METADATA.CA.FLAG_PV + SCAN_FLAG_PV = (cfg.get('METADATA', {}).get('CA', {}) or {}).get('FLAG_PV') + try: + SCAN_START_SCAN = bool(scan.get('START_SCAN')) if scan.get('START_SCAN') is not None else None + except Exception: + SCAN_START_SCAN = None + try: + SCAN_STOP_SCAN = bool(scan.get('STOP_SCAN')) if scan.get('STOP_SCAN') is not None else None + except Exception: + SCAN_STOP_SCAN = None + try: + SCAN_THRESHOLD = float(scan.get('THRESHOLD')) if scan.get('THRESHOLD') is not None else None + except Exception: + SCAN_THRESHOLD = None + try: + SCAN_MAX_CACHE_SIZE = int(scan.get('MAX_CACHE_SIZE')) if scan.get('MAX_CACHE_SIZE') is not None else None + except Exception: + SCAN_MAX_CACHE_SIZE = None + + # BIN + bin_opts = CACHE_OPTIONS.get('BIN', {}) or {} + try: + BIN_COUNT = int(bin_opts.get('COUNT')) if bin_opts.get('COUNT') is not None else None + except Exception: + BIN_COUNT = None + try: + BIN_SIZE = int(bin_opts.get('SIZE')) if bin_opts.get('SIZE') is not None else None + except Exception: + BIN_SIZE = None + + # Sections + metadata = cfg.get('METADATA', {}) or {} + METADATA_CA = metadata.get('CA', {}) or {} + METADATA_PVA = metadata.get('PVA', {}) or {} + + ROI = cfg.get('ROI', {}) or {} + STATS = cfg.get('STATS', {}) or {} + HKL = cfg.get('HKL', {}) or {} + ANALYSIS = cfg.get('ANALYSIS', {}) or {} + + +def _get_effective_locator() -> Union[int, str, None]: + """Determine the effective locator based on precedence: set_locator -> env var -> fallback TOML.""" + # 1) Programmatic locator via set_locator (primary when Browse runs) + if _locator_internal is not None: + return _locator_internal + + # 2) Optional override via environment variable + env_locator = os.getenv('DASPVA_CONFIG_LOCATOR') + if env_locator and env_locator.strip(): + loc = env_locator.strip() + if loc.isdigit(): + return int(loc) + return loc + + return None + + +# Initialize on import +reload() + + +class DictConfigSource(ConfigSource): + """ + In-memory configuration source backed by a Python dict. + Useful for constructing Settings from an already-built configuration object. + """ + def __init__(self, data: Dict[str, Any]): + self._data = dict(data) + self.source_type = "dict" + + def load(self) -> Dict[str, Any]: + return dict(self._data) + + def save(self, update: Dict[str, Any]) -> bool: + try: + self._data.clear() + self._data.update(update or {}) + return True + except Exception: + return False + + +class Settings: + """ + Object-oriented settings container. + + Can be constructed from: + - TOML path (str) + - DB profile id (int) + - "profile:" (str) + - ConfigSource instance (TomlConfigSource, DbProfileConfigSource, etc.) + - Plain dict via Settings.from_dict() + + Examples: + s1 = Settings.from_toml('pv_configs/sample_config.toml') + s2 = Settings.from_profile_id(42) + s3 = Settings.from_profile_name('my_profile') + s4 = Settings.from_source(TomlConfigSource('path/to.toml')) + s5 = Settings.from_dict({'DETECTOR_PREFIX': 'sim'}) + """ + + def __init__( + self, + locator: Optional[Union[int, str, ConfigSource, Dict[str, Any]]] = None, + repo: Optional[ConfigRepository] = None, + ) -> None: + self.repo = repo or ConfigRepository(DatabaseInterface()) + self.source_type: Optional[str] = None + self.locator: Optional[Union[int, str]] = None + self._source: Optional[ConfigSource] = None + self.CONFIG: Dict[str, Any] = {} + # Absolute project root directory for path resolution + self.PROJECT_ROOT: Path = Path(__file__).resolve().parent + + # Public attributes mirroring module-level constants + self.BEAMLINE_NAME: Optional[str] = None + self.DETECTOR_PREFIX: Optional[str] = None + self.OUTPUT_FILE_LOCATION: Optional[str] = None + self.CONSUMER_MODE: Optional[str] = None + + self.CACHING_MODE: Optional[str] = None + self.CACHE_OPTIONS: Dict[str, Any] = {} + self.ALIGNMENT_MAX_CACHE_SIZE: Optional[int] = None + self.SCAN_FLAG_PV: Optional[str] = None + self.SCAN_START_SCAN: Optional[bool] = None + self.SCAN_STOP_SCAN: Optional[bool] = None + self.SCAN_THRESHOLD: Optional[float] = None + self.SCAN_MAX_CACHE_SIZE: Optional[int] = None + self.BIN_COUNT: Optional[int] = None + self.BIN_SIZE: Optional[int] = None + + self.METADATA_CA: Dict[str, Any] = {} + self.METADATA_PVA: Dict[str, Any] = {} + self.ROI: Dict[str, Any] = {} + self.STATS: Dict[str, Any] = {} + self.HKL: Dict[str, Any] = {} + self.ANALYSIS: Dict[str, Any] = {} + + # Initialize + if locator is None: + self.locator = _get_effective_locator() + else: + self.set_locator(locator) + self.reload() + + # Convenience constructors + @classmethod + def from_locator(cls, locator: Union[int, str]) -> "Settings": + return cls(locator=locator) + + @classmethod + def from_toml(cls, path: str) -> "Settings": + return cls(locator=path) + + @classmethod + def from_profile_id(cls, profile_id: int) -> "Settings": + return cls(locator=profile_id) + + @classmethod + def from_profile_name(cls, name: str) -> "Settings": + return cls(locator=f"profile:{name}") + + @classmethod + def from_source(cls, source: ConfigSource) -> "Settings": + return cls(locator=source) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "Settings": + return cls(locator=DictConfigSource(data)) + + def set_locator(self, locator: Union[int, str, ConfigSource, Dict[str, Any]]) -> None: + """ + Accepts int (profile id), str (TOML path or "profile:"), a ConfigSource-like object, or dict. + """ + if isinstance(locator, dict): + self._source = DictConfigSource(locator) + self.locator = None + elif hasattr(locator, "load") and hasattr(locator, "save"): + # Treat as a ConfigSource-like object + self._source = locator # type: ignore[assignment] + self.locator = None + else: + self.locator = locator # type: ignore[assignment] + self._source = None + + def _resolve_source(self) -> Optional[ConfigSource]: + if self._source is not None: + return self._source + return self.repo.resolve(self.locator) + + def ensure_path(self) -> Optional[str]: + """ + Return a TOML path for the current source. + - TOML: original path + - DB: temp TOML path + - dict/other: writes temp TOML and returns its path + """ + src = self._resolve_source() + if src is None: + return None + + # TomlConfigSource exposes .path + if getattr(src, "source_type", None) == "toml" and hasattr(src, "path"): + return getattr(src, "path") + + # For DB or dict, create a temp TOML + try: + data = src.load() + fd, tmp = tempfile.mkstemp(suffix=".toml", prefix="dashpva_") + os.close(fd) + with open(tmp, "w") as f: + toml.dump(data, f) + return tmp + except Exception: + return None + + def reload(self) -> None: + """ + Load and parse configuration into object attributes. + """ + src = self._resolve_source() + self.source_type = getattr(src, 'source_type', None) if src else None + cfg: Dict[str, Any] = {} + try: + cfg = src.load() if src else {} + except Exception: + cfg = {} + self.CONFIG = cfg + + # Core + self.PROJECT_ROOT = PROJECT_ROOT + self.DETECTOR_PREFIX = cfg.get('DETECTOR_PREFIX') + self.OUTPUT_FILE_LOCATION = cfg.get('OUTPUT_FILE_LOCATION') + self.CONSUMER_MODE = cfg.get('CONSUMER_MODE') + + # Cache and convenience + self.CACHE_OPTIONS = cfg.get('CACHE_OPTIONS', {}) or {} + self.CACHING_MODE = self.CACHE_OPTIONS.get('CACHING_MODE') + + # ALIGNMENT + self.ALIGNMENT_MAX_CACHE_SIZE = None + try: + self.ALIGNMENT_MAX_CACHE_SIZE = int(self.CACHE_OPTIONS.get('ALIGNMENT', {}).get('MAX_CACHE_SIZE')) + except Exception: + pass + + # SCAN + scan = self.CACHE_OPTIONS.get('SCAN', {}) or {} + self.SCAN_FLAG_PV = (cfg.get('METADATA', {}).get('CA', {}) or {}).get('FLAG_PV') + try: + self.SCAN_START_SCAN = bool(scan.get('START_SCAN')) if scan.get('START_SCAN') is not None else None + except Exception: + self.SCAN_START_SCAN = None + try: + self.SCAN_STOP_SCAN = bool(scan.get('STOP_SCAN')) if scan.get('STOP_SCAN') is not None else None + except Exception: + self.SCAN_STOP_SCAN = None + try: + self.SCAN_THRESHOLD = float(scan.get('THRESHOLD')) if scan.get('THRESHOLD') is not None else None + except Exception: + self.SCAN_THRESHOLD = None + try: + self.SCAN_MAX_CACHE_SIZE = int(scan.get('MAX_CACHE_SIZE')) if scan.get('MAX_CACHE_SIZE') is not None else None + except Exception: + self.SCAN_MAX_CACHE_SIZE = None + + # BIN + bin_opts = self.CACHE_OPTIONS.get('BIN', {}) or {} + try: + self.BIN_COUNT = int(bin_opts.get('COUNT')) if bin_opts.get('COUNT') is not None else None + except Exception: + self.BIN_COUNT = None + try: + self.BIN_SIZE = int(bin_opts.get('SIZE')) if bin_opts.get('SIZE') is not None else None + except Exception: + self.BIN_SIZE = None + + # Sections + metadata = cfg.get('METADATA', {}) or {} + self.METADATA_CA = metadata.get('CA', {}) or {} + self.METADATA_PVA = metadata.get('PVA', {}) or {} + + self.ROI = cfg.get('ROI', {}) or {} + self.STATS = cfg.get('STATS', {}) or {} + self.HKL = cfg.get('HKL', {}) or {} + self.ANALYSIS = cfg.get('ANALYSIS', {}) or {} + + @property + def db(self) -> DatabaseInterface: + """Expose the underlying DatabaseInterface so callers can use database settings APIs.""" + return self.repo.db + + +# Export a default object instance using the same precedence as the module-level globals +SETTINGS = Settings() diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..f43e944 --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1,39 @@ +try: + from utils.hdf5_writer import HDF5Writer +except Exception: + pass + +try: + from utils.hdf5_loader import HDF5Loader +except Exception: + pass + +try: + from utils.hdf5_handler import HDF5Handler +except Exception: + pass + +try: + from utils.pva_reader import PVAReader +except Exception: + pass + +try: + from utils.size_manager import SizeManager +except Exception: + pass + +try: + from utils.generators import rotation_cycle +except Exception: + pass + +try: + from utils.dash_analysis import DashAnalysis +except Exception: + pass + +try: + from utils.rsm_converter import RSMConverter +except Exception: + pass \ No newline at end of file diff --git a/utils/dash_analysis.py b/utils/dash_analysis.py new file mode 100644 index 0000000..fd2e9c4 --- /dev/null +++ b/utils/dash_analysis.py @@ -0,0 +1,2549 @@ +""" +Dash Analysis Module + +Object-oriented analysis tools for HKL data processing, including slice extraction, +data manipulation, and visualization utilities for DashPVA. + +This module provides comprehensive tools for loading, processing, and analyzing +HKL crystallographic data, including slice extraction, coordinate transformations, +and data filtering operations. +""" + +import argparse +import os +from typing import Optional, Tuple, Union, List, Dict, Any +import numpy as np + +# We try to keep PyVista optional; only import when building a grid or plotting +try: + import pyvista as pv +except Exception: + pv = None + +try: + import sys + import os + sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + from utils.hdf5_loader import HDF5Loader +except Exception: + try: + from hdf5_loader import HDF5Loader + except Exception: + HDF5Loader=None + +# Fallback reader using h5py for simple cases if HDF5Loader is unavailable or fails +try: + import h5py +except Exception: + h5py = None + +# Optional Matplotlib for image plotting +try: + import matplotlib.pyplot as plt +except Exception: + plt = None + +try: + import numpy as np +except Exception: + np = None + + + +# ============================================================================ +# CLASSES +# ============================================================================ + +class Data: + """ + Container class for 3D point data and intensities. + + This class encapsulates point cloud data along with intensity values + for HKL crystallographic analysis. + + Attributes: + points (np.ndarray): 3D point coordinates with shape (N, 3) + intensities (np.ndarray): Intensity values with shape (N,) + """ + + def __init__(self, points: np.ndarray, intensities: np.ndarray, metadata: dict=None, num_images: int=0, shape: tuple=None): + """ + Initialize Data object. + + Args: + points: 3D point coordinates with shape (N, 3) + intensities: Intensity values with shape (N,) + """ + self.points = points + self.intensities = intensities + self.metadata = metadata + self.num_images = num_images + self.shape = shape + + + +class LineCutData(Data): + """ + Container class for line cut analysis results. + + This class stores the results of line cut operations on slice data, + including distance profiles, intensity values, and coordinate information. + + Attributes: + distance (np.ndarray): Distance values along the line cut + intensity (np.ndarray): Intensity values along the line cut + H (np.ndarray): H coordinate values along the line cut + K (np.ndarray): K coordinate values along the line cut + endpoints (tuple): Start and end points of the line cut + orientation (str): Orientation of the slice ('HK', 'KL', 'HL', etc.) + """ + + def __init__(self, distance: np.ndarray, intensity: np.ndarray, + H: np.ndarray, K: np.ndarray, endpoints: tuple, orientation: str): + """ + Initialize LineCutData object. + + Args: + distance: Distance values along the line cut + intensity: Intensity values along the line cut + H: H coordinate values along the line cut + K: K coordinate values along the line cut + endpoints: Start and end points of the line cut + orientation: Orientation of the slice + """ + self.distance = distance + self.intensity = intensity + self.H = H + self.K = K + self.endpoints = endpoints + self.orientation = orientation + + def get_peak(self): + """ + Identify peak positions in the line cut data. + + Returns: + Peak analysis results (to be implemented) + """ + pass + + +class SliceData: + """ + Container class for slice data and metadata. + + This class encapsulates slice data along with orientation information + and provides methods for data manipulation and access. + + Attributes: + data (Data): The slice data object + orientation (int): Orientation identifier for the slice + """ + + def __init__(self, data=None, orientation=0, shape=(512, 512)): + """Initialize SliceData object with default values.""" + self.data = Data(np.array([]), np.array([])) + self.orientation = 0 + + +class DashAnalysis: + """ + Main analysis class for DashPVA HKL data processing. + + This class provides comprehensive tools for loading, processing, and analyzing + HKL crystallographic data, including slice extraction, coordinate transformations, + volume creation, and visualization utilities. + + The class is designed to be Jupyter-friendly and supports both volume and + point cloud data formats with PyVista integration for 3D visualization. + + Attributes: + _last_image (np.ndarray): Cached raster image from last slice operation + _last_extent (list): Cached extent [U_min, U_max, V_min, V_max] from last slice + _last_orientation (str): Cached orientation from last slice operation + + Usage: + # Basic usage + da = DashAnalysis() + data = da.load_data('/path/to/file.h5') + + # Create and display volume + vol = da.create_vol(data.points, data.intensities) + da.show_vol(vol) + + # Create and analyze slices + slice_mesh = da.slice_data(data, hkl='HK') + da.show_slice(slice_mesh) + + # Perform line cuts + line_data = da.line_cut('zero', param=(1.0, 'x'), vol=slice_mesh) + """ + + def __init__(self): + """ + Initialize DashAnalysis with empty caches. + + Caches are used to store the last raster image and extent/orientation + for efficient line cut operations without requiring volume regeneration. + """ + # caches for last raster image and extent/orientation for line cuts + self._last_image = None + self._last_extent = None # [U_min, U_max, V_min, V_max] + self._last_orientation = None + + +# ============================================================================ +# MAIN METHODS (Ordered by complexity/length - longest to shortest) +# ============================================================================ + + def slice_data(self, data, hkl=None, normal=None, shape=(512,512), slab_thickness=None, + clamp_to_bounds=True, spacing=(0.5, 0.5, 0.5), grid_origin=(0.0, 0.0, 0.0), + show=True, axes=None, intensity_range=None, **kwargs): + """ + Create a slice from either a volume or a point cloud with advanced processing options. + + This is the most comprehensive slice extraction method, supporting both volume and + point cloud data with customizable HKL axes, integration parameters, and visualization + options. Handles coordinate transformations, adaptive interpolation, and metadata preservation. + + Usage: + # Traditional usage with presets + slice_data = da.slice_data(data, hkl='HK') + + # Mixed HKL axes with custom orientation + slice_data = da.slice_data(data, axes=((0.5, 0, 0), (0, 1, 0))) + + # Point cloud slicing with slab thickness + slice_data = da.slice_data(point_data, hkl='KL', slab_thickness=0.2) + + Parameters: + data: Volume or point cloud data. Supported formats: + - Volume: pv.ImageData, np.ndarray (D,H,W), or (ndarray_volume, shape_tuple) + - Points: (points, intensities) where points is (N,3) and intensities is (N,) + - Data object with .points and .intensities attributes + - Dict with 'points' and 'intensities' keys + hkl: Slice origin or orientation preset. Options: + - 3-vector: Used directly as slice origin coordinates + - String preset: 'HK'/'XY', 'KL'/'YZ', 'HL'/'XZ' sets normal and uses dataset center + normal (array-like): Slice plane normal vector (3,). Overridden by hkl presets + shape (tuple): Resolution (rows, cols) for sampling the plane when slicing points + slab_thickness (float): Thickness of selection slab around plane for point slicing + clamp_to_bounds (bool): Clamp origin to dataset bounds + spacing (tuple): Voxel spacing (ΔH, ΔK, ΔL) for grid construction from NumPy volumes + grid_origin (tuple): Grid origin (H0, K0, L0) for grid construction from NumPy volumes + show (bool): If True, displays the slice using show_slice + axes: Optional HKL axis specification. Formats: + - ((u_hkl, v_hkl),): Two 3-vectors defining in-plane axes in HKL coordinates + - ((u_hkl, v_hkl), n_hkl): Two in-plane axes plus normal vector + - None: Use hkl/normal parameters as before + intensity_range (tuple): Optional (min, max) intensity bounds used to filter + contributing data prior to slicing/interpolation. Use None for open bounds, + e.g., (None, 500) or (100, None). Default None applies no filtering (full + intensity range). + **kwargs: Additional keyword arguments passed to show_slice when show=True. + Common options include: + - axis_display: 'hkl' (default) or 'uv' for axis label format + - cmap: Colormap name for display + - clim: (vmin, vmax) intensity display limits + + Returns: + pv.PolyData: Slice mesh with field_data containing: + - 'slice_normal': Normal vector of the slice plane + - 'slice_origin': Origin point of the slice plane + - 'slice_u_axis': U-axis vector in HKL coordinates (if custom axes used) + - 'slice_v_axis': V-axis vector in HKL coordinates (if custom axes used) + - 'slice_u_label': Formatted U-axis label (e.g., "H + K") + - 'slice_v_label': Formatted V-axis label (e.g., "L/2") + + Raises: + ImportError: If PyVista is not available + TypeError: If data format is not supported + ValueError: If slice parameters are invalid + + Examples: + # Extract HK plane slice from volume + hk_slice = da.slice_data(volume_data, hkl='HK') + + # Extract custom orientation slice from point cloud + custom_slice = da.slice_data( + point_data, + axes=((1, 1, 0), (0, 0, 1)), + slab_thickness=0.1 + ) + + # High-resolution slice with specific spacing + hr_slice = da.slice_data( + data, hkl=(1.0, 0.5, 0.0), + shape=(1024, 1024), + spacing=(0.1, 0.1, 0.1) + ) + + # Slice with UV axis labels + uv_slice = da.slice_data( + data, hkl='HK', + axis_display='uv' + ) + """ + if pv is None: + raise ImportError("PyVista is required for slice_data()") + + import numpy as _np + + # Helper: normalize volume to pv.ImageData with cell_data['intensity'] + def _ensure_grid(_vol, _spacing=(1.0, 1.0, 1.0), _origin=(0.0, 0.0, 0.0)): + if isinstance(_vol, pv.ImageData): + _grid = _vol + if ('intensity' not in _grid.cell_data) and ('intensity' in _grid.point_data): + _grid = _grid.point_data_to_cell_data(pass_point_data=False) + if 'intensity' not in _grid.cell_data: + raise ValueError("ImageData must have cell_data['intensity'] for slicing.") + return _grid + if isinstance(_vol, (tuple, list)) and len(_vol) >= 1 and isinstance(_vol[0], _np.ndarray): + _vol_np = _vol[0] + elif isinstance(_vol, _np.ndarray): + _vol_np = _vol + else: + raise TypeError("Volume must be pv.ImageData, a NumPy ndarray (D,H,W), or (ndarray_volume, shape) tuple.") + if _vol_np.ndim != 3: + raise ValueError("NumPy volume must be 3D shaped (D,H,W).") + _dims_cells = _np.array(_vol_np.shape, dtype=int) + _grid = pv.ImageData() + _grid.dimensions = (_dims_cells + 1).tolist() + _grid.spacing = tuple(float(x) for x in _spacing) + _grid.origin = tuple(float(x) for x in _origin) + _grid.cell_data['intensity'] = _np.asarray(_vol_np, dtype=_np.float32).flatten(order='F') + return _grid + + # Helper: resolve normal and origin + def _resolve_plane(_dataset_center, _bounds): + # Resolve normal (from hkl preset or provided normal) + _n = None + if isinstance(hkl, str): + s = hkl.strip().lower() + if s in ('hk', 'xy'): + _n = _np.array([0.0, 0.0, 1.0], dtype=float) + elif s in ('kl', 'yz'): + _n = _np.array([1.0, 0.0, 0.0], dtype=float) + elif s in ('hl', 'xz'): + _n = _np.array([0.0, 1.0, 0.0], dtype=float) + if _n is None: + _n = _np.array(normal if (normal is not None) else [0.0, 0.0, 1.0], dtype=float) + # Normalize + nlen = float(_np.linalg.norm(_n)) + if not _np.isfinite(nlen) or nlen <= 0.0: + _n = _np.array([0.0, 0.0, 1.0], dtype=float) + else: + _n = _n / nlen + + # Resolve origin + if isinstance(hkl, (tuple, list, _np.ndarray)) and len(hkl) == 3: + _o = _np.array([float(hkl[0]), float(hkl[1]), float(hkl[2])], dtype=float) + else: + _o = _np.array(_dataset_center if _dataset_center is not None else [0.0, 0.0, 0.0], dtype=float) + + # Clamp origin to bounds + if clamp_to_bounds and (_bounds is not None) and (len(_bounds) == 6): + _o[0] = float(_np.clip(_o[0], _bounds[0], _bounds[1])) + _o[1] = float(_np.clip(_o[1], _bounds[2], _bounds[3])) + _o[2] = float(_np.clip(_o[2], _bounds[4], _bounds[5])) + + return _o, _n + + # Distinguish volume vs points + is_volume_like = isinstance(data, pv.ImageData) or isinstance(data, np.ndarray) or (isinstance(data, (tuple, list)) and len(data) >= 1 and isinstance(data[0], np.ndarray) and data[0].ndim == 3) + # If volume return the slice + if is_volume_like: + grid = _ensure_grid(data, _spacing=spacing, _origin=grid_origin) + # Apply intensity range filter if provided (volume data) + if intensity_range is not None: + try: + if isinstance(intensity_range, (tuple, list)) and len(intensity_range) == 2: + _imin = None if intensity_range[0] is None else float(intensity_range[0]) + _imax = None if intensity_range[1] is None else float(intensity_range[1]) + else: + raise ValueError("intensity_range must be a (min, max) tuple") + + _arr = np.asarray(grid.cell_data['intensity']).astype(np.float32) + _mask = np.ones(_arr.shape, dtype=bool) + if _imin is not None: + _mask &= (_arr >= _imin) + if _imax is not None: + _mask &= (_arr <= _imax) + if not np.any(_mask): + import warnings as _warnings + _warnings.warn("slice_data: intensity_range excluded all voxels; leaving volume unfiltered.") + else: + _arr[~_mask] = 0.0 + grid.cell_data['intensity'] = _arr + except Exception: + # Be permissive; if anything goes wrong with filtering, continue unfiltered + pass + center = getattr(grid, 'center', (0.0, 0.0, 0.0)) + origin_vec, n_vec = _resolve_plane(center, getattr(grid, 'bounds', None)) + sl = grid.slice(normal=n_vec, origin=origin_vec) + sl.field_data['slice_normal'] = np.asarray(n_vec, dtype=float) + sl.field_data['slice_origin'] = np.asarray(origin_vec, dtype=float) + # Attach constant unit normals matching the slice normal + try: + normals_point = np.tile(np.asarray(n_vec, dtype=np.float32), (sl.n_points, 1)) + sl.point_data['Normals'] = normals_point + try: + sl.point_data.set_active_normals('Normals') + except Exception: + try: + sl.set_active_vectors('Normals') + except Exception: + pass + if sl.n_cells > 0: + normals_cell = np.tile(np.asarray(n_vec, dtype=np.float32), (sl.n_cells, 1)) + sl.cell_data['Normals'] = normals_cell + except Exception: + pass + # Store a display clim on the slice similar to 3D viewer behavior + try: + vals = _np.asarray(sl['intensity'], dtype=float).reshape(-1) + except Exception: + vals = None + disp_min = None + disp_max = None + try: + if isinstance(intensity_range, (tuple, list)) and len(intensity_range) == 2: + imin, imax = intensity_range + disp_min = (float(imin) if (imin is not None) else (float(_np.nanmin(vals)) if (vals is not None and vals.size > 0) else None)) + disp_max = (float(imax) if (imax is not None) else (float(_np.nanmax(vals)) if (vals is not None and vals.size > 0) else None)) + else: + if vals is not None and vals.size > 0: + disp_min = float(_np.nanmin(vals)) + disp_max = float(_np.nanmax(vals)) + except Exception: + disp_min = disp_min if disp_min is not None else None + disp_max = disp_max if disp_max is not None else None + try: + if (disp_min is not None) and (disp_max is not None) and _np.isfinite(disp_min) and _np.isfinite(disp_max): + sl.field_data['slice_intensity_clim'] = _np.asarray([disp_min, disp_max], dtype=float) + except Exception: + pass + # call show slice if requested + if show: + self.show_slice(sl, shape=shape, **kwargs) + return sl + + # Treat as point cloud + # Extract points and intensities + points = None + intensities = None + # if data is input as data=(points,intensities) + if isinstance(data, (tuple, list)) and len(data) >= 2: + points = np.asarray(data[0], dtype=float) + intensities = np.asarray(data[1], dtype=float).reshape(-1) + # if input is input as data + elif hasattr(data, 'points') and hasattr(data, 'intensities'): + points = np.asarray(getattr(data, 'points'), dtype=float) + intensities = np.asarray(getattr(data, 'intensities'), dtype=float).reshape(-1) + elif isinstance(data, dict): + points = np.asarray(data.get('points'), dtype=float) + intensities = np.asarray(data.get('intensities'), dtype=float).reshape(-1) + else: + raise TypeError("Point data must be provided as (points, intensities) tuple/list, object with .points/.intensities, or {'points': ..., 'intensities': ...} dict.") + + if points is None or intensities is None or points.ndim != 2 or points.shape[1] != 3 or intensities.shape[0] != points.shape[0]: + raise ValueError("Invalid point data: points must be (N,3) and intensities must be length N.") + + # Build cloud and bounds + cloud = pv.PolyData(points) + cloud['intensity'] = intensities.astype('float32') + + minb = points.min(axis=0) + maxb = points.max(axis=0) + bounds = (float(minb[0]), float(maxb[0]), float(minb[1]), float(maxb[1]), float(minb[2]), float(maxb[2])) + center = ((minb + maxb) * 0.5).astype(float) + + origin_vec, n_vec = _resolve_plane(center, bounds) + + # Optional pre-filter: limit contributing points to a slab around the plane for interpolation + rel = points - origin_vec[None, :] + d_signed = rel.dot(n_vec) + use_slab = slab_thickness is not None and np.isfinite(float(slab_thickness)) and float(slab_thickness) > 0.0 + if use_slab: + tol = float(slab_thickness) + mask_slab = np.abs(d_signed) <= tol + # Fallback to all points if slab yields none + if not np.any(mask_slab): + mask_slab = np.ones(points.shape[0], dtype=bool) + else: + mask_slab = np.ones(points.shape[0], dtype=bool) + + # Optional intensity range filter + if intensity_range is not None and isinstance(intensity_range, (tuple, list)) and len(intensity_range) == 2: + try: + _imin = None if intensity_range[0] is None else float(intensity_range[0]) + _imax = None if intensity_range[1] is None else float(intensity_range[1]) + except Exception: + _imin = None; _imax = None + mask_int = np.ones(intensities.shape[0], dtype=bool) + if _imin is not None: + mask_int &= (intensities >= _imin) + if _imax is not None: + mask_int &= (intensities <= _imax) + else: + mask_int = np.ones(intensities.shape[0], dtype=bool) + + mask_contrib = mask_slab & mask_int + + # For extent estimation, prefer slab mask (geometry) even if intensity filter removes all + if np.any(mask_slab): + pts_for_extent = points[mask_slab] + vals_for_extent = intensities[mask_slab] + else: + pts_for_extent = points + vals_for_extent = intensities + + # Resolve HKL axes or build default in-plane basis + u_hkl = None + v_hkl = None + n_hkl = None + + if axes is not None: + # Parse axes parameter: ((u_hkl, v_hkl),) or ((u_hkl, v_hkl), n_hkl) + if isinstance(axes, (tuple, list)) and len(axes) >= 2: + u_hkl = _np.asarray(axes[0], dtype=float) + v_hkl = _np.asarray(axes[1], dtype=float) + if len(axes) >= 3: + n_hkl = _np.asarray(axes[2], dtype=float) + # Normalize provided normal + n_len = float(_np.linalg.norm(n_hkl)) + if _np.isfinite(n_len) and n_len > 0.0: + n_hkl = n_hkl / n_len + n_vec = n_hkl # Override computed normal + else: + # Compute normal from u_hkl × v_hkl + n_computed = _np.cross(u_hkl, v_hkl) + n_len = float(_np.linalg.norm(n_computed)) + if _np.isfinite(n_len) and n_len > 0.0: + n_hkl = n_computed / n_len + n_vec = n_hkl # Override computed normal + + if u_hkl is not None and v_hkl is not None: + # Use provided HKL axes directly (preserving scale) + u = u_hkl + v = v_hkl + else: + # Build default orthonormal in-plane basis from normal + world_axes = [ + np.array([1.0, 0.0, 0.0], dtype=float), + np.array([0.0, 1.0, 0.0], dtype=float), + np.array([0.0, 0.0, 1.0], dtype=float), + ] + ref = world_axes[0] + for ax in world_axes: + if abs(float(np.dot(ax, n_vec))) < 0.9: + ref = ax + break + u = np.cross(n_vec, ref) + u_len = float(np.linalg.norm(u)) + if not np.isfinite(u_len) or u_len <= 0.0: + ref = np.array([0.0, 1.0, 0.0], dtype=float) + u = np.cross(n_vec, ref) + u_len = float(np.linalg.norm(u)) + if not np.isfinite(u_len) or u_len <= 0.0: + u = np.array([1.0, 0.0, 0.0], dtype=float) + u_len = 1.0 + u = u / u_len + v = np.cross(n_vec, u) + v_len = float(np.linalg.norm(v)) + if not np.isfinite(v_len) or v_len <= 0.0: + v = np.array([0.0, 1.0, 0.0], dtype=float) + else: + v = v / v_len + + # Project points used for extent, compute extents + rel_ext = pts_for_extent - origin_vec[None, :] + U = rel_ext.dot(u) + V = rel_ext.dot(v) + U_min, U_max = float(np.min(U)), float(np.max(U)) + V_min, V_max = float(np.min(V)), float(np.max(V)) + if not np.isfinite(U_min) or not np.isfinite(U_max) or U_max == U_min: + U_min, U_max = -0.5, 0.5 + if not np.isfinite(V_min) or not np.isfinite(V_max) or V_max == V_min: + V_min, V_max = -0.5, 0.5 + + # Slight padding + pad_u = (U_max - U_min) * 0.02 + pad_v = (V_max - V_min) * 0.02 + U_min -= pad_u + U_max += pad_u + V_min -= pad_v + V_max += pad_v + + i_size = max(U_max - U_min, 1e-6) + j_size = max(V_max - V_min, 1e-6) + H, W = ((int(shape[0]), int(shape[1])) if (isinstance(shape, (tuple, list)) and len(shape) == 2) else tuple(getattr(data, 'metadata')['datasets']['/entry/data/data']['shape'][-2:])) + H = max(int(H), 2) + W = max(int(W), 2) + + # Create plane sized by extents and interpolate point data onto it + # ORIGINAL (kept for reference): + # plane = pv.Plane(center=origin_vec.tolist(), direction=n_vec.tolist(), + # i_size=i_size, j_size=j_size, i_resolution=W, j_resolution=H) + # Use W-1/H-1 so plane.n_points == H*W; reduces work and aligns with stored slice_shape + plane = pv.Plane(center=origin_vec.tolist(), direction=n_vec.tolist(), + i_size=i_size, j_size=j_size, i_resolution=W-1, j_resolution=H-1) + + # Choose contributing cloud based on slab and intensity range + if np.any(mask_contrib): + cloud_contrib = pv.PolyData(points[mask_contrib]) + cloud_contrib['intensity'] = intensities[mask_contrib].astype('float32') + no_contrib = False + else: + no_contrib = True + + # Use smart radius calculation to minimize gaps + optimal_radius = self._calculate_smart_radius( + pts_for_extent, + (U_min, U_max), + (V_min, V_max), + (H, W) + ) + + if not no_contrib: + interp_plane = plane.interpolate( + cloud_contrib, + radius=optimal_radius, + sharpness=1.5, + null_value=0.0 + ) + else: + # No contributing points: return a zero-intensity plane + interp_plane = plane.copy() + try: + interp_plane['intensity'] = np.zeros(interp_plane.n_points, dtype=np.float32) + except Exception: + pass + import warnings as _warnings + _warnings.warn("slice_data: intensity_range and/or slab_thickness excluded all points; returning empty slice.") + interp_plane.field_data['slice_normal'] = np.asarray(n_vec, dtype=float) + interp_plane.field_data['slice_origin'] = np.asarray(origin_vec, dtype=float) + # Attach constant unit normals matching the slice normal + try: + normals_point = np.tile(np.asarray(n_vec, dtype=np.float32), (interp_plane.n_points, 1)) + interp_plane.point_data['Normals'] = normals_point + try: + interp_plane.point_data.set_active_normals('Normals') + except Exception: + try: + interp_plane.set_active_vectors('Normals') + except Exception: + pass + if interp_plane.n_cells > 0: + normals_cell = np.tile(np.asarray(n_vec, dtype=np.float32), (interp_plane.n_cells, 1)) + interp_plane.cell_data['Normals'] = normals_cell + except Exception: + pass + # Store a display clim on the slice similar to 3D viewer behavior + try: + vals = _np.asarray(interp_plane['intensity'], dtype=float).reshape(-1) + except Exception: + vals = None + disp_min = None + disp_max = None + try: + if isinstance(intensity_range, (tuple, list)) and len(intensity_range) == 2: + imin, imax = intensity_range + disp_min = (float(imin) if (imin is not None) else (float(_np.nanmin(vals)) if (vals is not None and vals.size > 0) else None)) + disp_max = (float(imax) if (imax is not None) else (float(_np.nanmax(vals)) if (vals is not None and vals.size > 0) else None)) + else: + if vals is not None and vals.size > 0: + disp_min = float(_np.nanmin(vals)) + disp_max = float(_np.nanmax(vals)) + except Exception: + disp_min = disp_min if disp_min is not None else None + disp_max = disp_max if disp_max is not None else None + try: + if (disp_min is not None) and (disp_max is not None) and _np.isfinite(disp_min) and _np.isfinite(disp_max): + interp_plane.field_data['slice_intensity_clim'] = _np.asarray([disp_min, disp_max], dtype=float) + except Exception: + pass + + # Store HKL axes for downstream use + interp_plane.field_data['slice_u_axis'] = np.asarray(u, dtype=float) + interp_plane.field_data['slice_v_axis'] = np.asarray(v, dtype=float) + # Persist the slice resolution so downstream display/analysis can honor it + interp_plane.field_data['slice_shape'] = _np.asarray([H, W], dtype=int) + + # Store HKL axis labels if available + if u_hkl is not None: + interp_plane.field_data['slice_u_label'] = format_hkl_axis(u_hkl) + if v_hkl is not None: + interp_plane.field_data['slice_v_label'] = format_hkl_axis(v_hkl) + + if show: + self.show_slice(interp_plane, shape=shape, **kwargs) + # sd = SliceData(data=Data()) + return interp_plane + + def line_cut(self, spec, param=None, vol=None, hkl='HK', origin=None, shape=(512, 512), + n_samples=512, width_px=1, show=True, interactive=False): + """ + Compute a line cut on a slice with comprehensive analysis options. + + This method performs line cuts on slice data supporting multiple specification formats, + interactive editing, and comprehensive profile analysis. Supports both endpoint-based + and preset-based line definitions with real-time visualization. + + Usage: + # Horizontal line cut at V=1.0 + line_data = da.line_cut('zero', param=(1.0, 'x'), vol=slice_mesh) + + # Interactive line cut with draggable endpoints + line_data = da.line_cut(((0, 0), (1, 1)), vol=slice_mesh, interactive=True) + + # Diagonal line cut with averaging + line_data = da.line_cut('positive', vol=slice_mesh, width_px=3) + + Parameters: + spec: Line specification. Options: + - ((U1,V1),(U2,V2)): Explicit endpoints in physical slice coordinates + - 'zero'/'horizontal': Horizontal line at fixed V value + - 'infinite'/'vertical': Vertical line at fixed U value + - 'positive': Diagonal from (U_min,V_min) to (U_max,V_max) + - 'negative': Diagonal from (U_min,V_max) to (U_max,V_min) + param (tuple): Required for preset lines. Format (value, axis_letter): + - For 'zero': (V_value, 'x') fixes V and traverses U_min→U_max + - For 'infinite': (U_value, 'y') fixes U and traverses V_min→V_max + vol: Optional volume or slice data. Formats: + - pv.PolyData slice mesh + - (img, extent) tuple from show_slice(..., return_image=True) + - Volume data for fresh slice generation + - None: Uses cached last image from previous show_slice call + hkl (str): Orientation preset when generating slice from vol ('HK', 'KL', 'HL') + origin (tuple): Slice origin (H,K,L) when generating slice from vol + shape (tuple): Raster resolution (H, W) when generating slice from vol + n_samples (int): Number of samples along the line cut + width_px (int): Averaging strip width in pixels normal to line (1 = true line) + show (bool): If True, overlays line on slice image and displays 1D profile + interactive (bool): If True, enables draggable endpoints with live updates + + Returns: + dict: Line cut analysis results containing: + - 'distance': np.ndarray of distance values along line + - 'intensity': np.ndarray of intensity values along line + - 'U': np.ndarray of U coordinates along line + - 'V': np.ndarray of V coordinates along line + - 'endpoints': ((U1,V1),(U2,V2)) actual endpoints used + - 'orientation': str orientation of the slice ('HK', 'KL', 'HL', etc.) + + Raises: + ImportError: If PyVista or matplotlib are not available + ValueError: If line specification is invalid or no slice data available + + Examples: + # Horizontal line cut through peak + horizontal = da.line_cut('zero', param=(0.5, 'x'), vol=slice_data) + + # Vertical line cut with wide averaging + vertical = da.line_cut('infinite', param=(1.0, 'y'), width_px=5) + + # Custom endpoints with interactive editing + custom = da.line_cut(((0.2, 0.3), (0.8, 0.7)), interactive=True) + + # Diagonal analysis across full extent + diagonal = da.line_cut('positive', n_samples=1024, show=True) + """ + import numpy as _np + + # Resolve image and extent + H, W = None, None + orientation = None + U_min = U_max = V_min = V_max = None + img = None + + try: + if vol is not None: + # Support passing a pre-rasterized image and its extent (as returned by show_slice(..., return_image=True)) + if isinstance(vol, (tuple, list)) and len(vol) >= 2: + try: + img_candidate = _np.asarray(vol[0]) + ext_candidate = vol[1] + if img_candidate.ndim == 2 and isinstance(ext_candidate, (list, tuple)) and len(ext_candidate) == 4: + img = img_candidate.astype(_np.float32) + U_min, U_max, V_min, V_max = float(ext_candidate[0]), float(ext_candidate[1]), float(ext_candidate[2]), float(ext_candidate[3]) + H, W = img.shape[:2] + orientation = self._last_orientation or "Auto" + # cache for subsequent calls + self._last_image = img + self._last_extent = [U_min, U_max, V_min, V_max] + self._last_orientation = orientation + else: + pass # fall through + except Exception: + pass # fall through + + if img is None: + if pv is None: + raise ImportError("PyVista is required to build a slice from 'vol'.") + + # If vol is already a slice mesh + if isinstance(vol, pv.PolyData): + sl = vol + n_vec = _np.asarray(getattr(sl, 'field_data', {}).get('slice_normal', _np.array([0.0, 0.0, 1.0], dtype=float)), dtype=float) + o_vec = _np.asarray(getattr(sl, 'field_data', {}).get('slice_origin', _np.asarray(getattr(sl, 'center', (0.0, 0.0, 0.0)), dtype=float)), dtype=float) + else: + # If vol looks like a 3D volume, require an explicit slice beforehand. + # Call show_slice(..., return_image=True) and pass (img, extent) to line_cut. + is_3d_volume = isinstance(vol, pv.ImageData) or (isinstance(vol, _np.ndarray) and vol.ndim == 3) or (isinstance(vol, (tuple, list)) and len(vol) >= 1 and isinstance(vol[0], _np.ndarray) and getattr(vol[0], "ndim", None) == 3) + if is_3d_volume: + raise ValueError("line_cut expects slice data. Pass a pv.PolyData slice or (img, extent) from show_slice(..., return_image=True).") + # Otherwise attempt to slice via slice_data using defaults + sl = self.slice_data(vol, hkl='HK', shape=(512, 512), clamp_to_bounds=True) + n_vec = _np.asarray(getattr(sl, 'field_data', {}).get('slice_normal', _np.array([0.0, 0.0, 1.0], dtype=float)), dtype=float) + o_vec = _np.asarray(getattr(sl, 'field_data', {}).get('slice_origin', _np.asarray(getattr(sl, 'center', (0.0, 0.0, 0.0)), dtype=float)), dtype=float) + + pts = _np.asarray(getattr(sl, 'points', _np.empty((0, 3))), dtype=float) + try: + vals = _np.asarray(sl['intensity'], dtype=float).reshape(-1) + except Exception: + vals = _np.zeros((pts.shape[0],), dtype=float) + + H = max(int(shape[0] if (isinstance(shape, (tuple, list)) and len(shape) == 2) else 512), 2) + W = max(int(shape[1] if (isinstance(shape, (tuple, list)) and len(shape) == 2) else 512), 2) + + def _infer_orientation_and_axes(normal_vec: _np.ndarray): + nn = _np.asarray(normal_vec, dtype=float) + nn_len = float(_np.linalg.norm(nn)) + if not _np.isfinite(nn_len) or nn_len <= 0.0: + nn = _np.array([0.0, 0.0, 1.0], dtype=float) + else: + nn = nn / nn_len + X = _np.array([1.0, 0.0, 0.0], dtype=float) # H + Y = _np.array([0.0, 1.0, 0.0], dtype=float) # K + Z = _np.array([0.0, 0.0, 1.0], dtype=float) # L + tol = 0.95 + dX = abs(float(_np.dot(nn, X))) + dY = abs(float(_np.dot(nn, Y))) + dZ = abs(float(_np.dot(nn, Z))) + if dZ >= tol: + return "HK", (0, 1) + if dX >= tol: + return "KL", (1, 2) + if dY >= tol: + return "HL", (0, 2) + return "Custom", None + + orientation, uv_idxs = _infer_orientation_and_axes(n_vec) + + if pts.size == 0 or vals.size == 0 or pts.shape[0] != vals.shape[0]: + raise ValueError("Slice contains no valid points to rasterize") + + if uv_idxs is not None: + u_idx, v_idx = uv_idxs + U = pts[:, u_idx].astype(float) + V = pts[:, v_idx].astype(float) + U_min, U_max = float(_np.min(U)), float(_np.max(U)) + V_min, V_max = float(_np.min(V)), float(_np.max(V)) + if (not _np.isfinite(U_min)) or (not _np.isfinite(U_max)) or (U_max == U_min): + U_min, U_max = -0.5, 0.5 + if (not _np.isfinite(V_min)) or (not _np.isfinite(V_max)) or (V_max == V_min): + V_min, V_max = -0.5, 0.5 + sum_img, _, _ = _np.histogram2d(V, U, bins=[H, W], range=[[V_min, V_max], [U_min, U_max]], weights=vals) + cnt_img, _, _ = _np.histogram2d(V, U, bins=[H, W], range=[[V_min, V_max], [U_min, U_max]]) + with _np.errstate(invalid="ignore", divide="ignore"): + img = _np.zeros_like(sum_img, dtype=_np.float32) + nz = cnt_img > 0 + img[nz] = (sum_img[nz] / cnt_img[nz]).astype(_np.float32) + img[~nz] = 0.0 + else: + # Custom orientation: build in-plane basis from normal and origin + world_axes = [ + _np.array([1.0, 0.0, 0.0], dtype=float), + _np.array([0.0, 1.0, 0.0], dtype=float), + _np.array([0.0, 0.0, 1.0], dtype=float), + ] + ref = world_axes[0] + for ax in world_axes: + if abs(float(_np.dot(ax, n_vec))) < 0.9: + ref = ax + break + u = _np.cross(n_vec, ref) + u_len = float(_np.linalg.norm(u)) + if not _np.isfinite(u_len) or u_len <= 0.0: + ref = _np.array([0.0, 1.0, 0.0], dtype=float) + u = _np.cross(n_vec, ref) + u_len = float(_np.linalg.norm(u)) + if not _np.isfinite(u_len) or u_len <= 0.0: + u = _np.array([1.0, 0.0, 0.0], dtype=float) + u_len = 1.0 + u = u / u_len + v = _np.cross(n_vec, u) + v_len = float(_np.linalg.norm(v)) + if not _np.isfinite(v_len) or v_len <= 0.0: + v = _np.array([0.0, 1.0, 0.0], dtype=float) + else: + v = v / v_len + + rel = _np.asarray(pts - o_vec[None, :], dtype=float) + U = rel.dot(u) + V = rel.dot(v) + + U_min, U_max = float(_np.min(U)), float(_np.max(U)) + V_min, V_max = float(_np.min(V)), float(_np.max(V)) + if not _np.isfinite(U_min) or not _np.isfinite(U_max) or (U_max == U_min): + U_min, U_max = -0.5, 0.5 + if not _np.isfinite(V_min) or not _np.isfinite(V_max) or (V_max == V_min): + V_min, V_max = -0.5, 0.5 + + sum_img, _, _ = _np.histogram2d(V, U, bins=[H, W], range=[[V_min, V_max], [U_min, U_max]], weights=vals) + cnt_img, _, _ = _np.histogram2d(V, U, bins=[H, W], range=[[V_min, V_max], [U_min, U_max]]) + with _np.errstate(invalid="ignore", divide="ignore"): + img = _np.zeros_like(sum_img, dtype=_np.float32) + nz = cnt_img > 0 + img[nz] = (sum_img[nz] / cnt_img[nz]).astype(_np.float32) + img[~nz] = 0.0 + + # cache + self._last_image = img + self._last_extent = [U_min, U_max, V_min, V_max] + self._last_orientation = orientation + else: + img = self._last_image + if img is None or self._last_extent is None: + raise ValueError("No slice image available; provide 'vol' or call show_slice(..., return_image=True) first.") + U_min, U_max, V_min, V_max = self._last_extent + H, W = img.shape[:2] + orientation = self._last_orientation or "Auto" + except Exception as e: + raise + + # Endpoints from spec/preset + def _endpoints_from_spec(_spec, _param): + if isinstance(_spec, (tuple, list)) and len(_spec) == 2: + return tuple(_spec[0]), tuple(_spec[1]) + s = str(_spec).strip().lower() + if s in ("zero", "horizontal", "0"): + if not (_param and len(_param) == 2): + raise ValueError("param=(value,'x') required for 'zero' preset") + val, ax = _param + ax = str(ax).lower() + # V fixed at val; U spans full range + return (U_min, float(val)), (U_max, float(val)) + if s in ("infinite", "vertical", "inf"): + if not (_param and len(_param) == 2): + raise ValueError("param=(value,'y') required for 'infinite' preset") + val, ax = _param + ax = str(ax).lower() + # U fixed at val; V spans full range + return (float(val), V_min), (float(val), V_max) + if s in ("positive", "pos"): + return (U_min, V_min), (U_max, V_max) + if s in ("negative", "neg"): + return (U_min, V_max), (U_max, V_min) + raise ValueError(f"Unknown spec '{_spec}'; pass endpoints ((U1,V1),(U2,V2)) or preset string") + + (U1, V1), (U2, V2) = _endpoints_from_spec(spec, param) + + # Convert endpoints to pixel coords + def _uv_to_pixel(Uv, Vv): + col = (float(Uv) - U_min) / (U_max - U_min if (U_max != U_min) else 1.0) * (W - 1) + row = (float(Vv) - V_min) / (V_max - V_min if (V_max != V_min) else 1.0) * (H - 1) + return col, row + + c1, r1 = _uv_to_pixel(U1, V1) + c2, r2 = _uv_to_pixel(U2, V2) + + # Sampling points along the line in pixel space + n_samples = max(int(n_samples), 2) + ts = _np.linspace(0.0, 1.0, n_samples, dtype=float) + cols = c1 + ts * (c2 - c1) + rows = r1 + ts * (r2 - r1) + + # Bilinear interpolation + def _bilinear(img_arr, cc, rr): + h, w = img_arr.shape[:2] + cc = _np.clip(cc, 0.0, w - 1.0) + rr = _np.clip(rr, 0.0, h - 1.0) + c0 = _np.floor(cc).astype(int) + r0 = _np.floor(rr).astype(int) + c1i = _np.clip(c0 + 1, 0, w - 1) + r1i = _np.clip(r0 + 1, 0, h - 1) + dc = cc - c0 + dr = rr - r0 + I00 = img_arr[r0, c0] + I10 = img_arr[r0, c1i] + I01 = img_arr[r1i, c0] + I11 = img_arr[r1i, c1i] + return (1 - dc) * (1 - dr) * I00 + dc * (1 - dr) * I10 + (1 - dc) * dr * I01 + dc * dr * I11 + + # Width averaging across perpendicular offsets + width_px = max(int(width_px), 1) + if width_px == 1: + prof = _bilinear(img, cols, rows) + else: + dcol = c2 - c1 + drow = r2 - r1 + length = float(_np.hypot(dcol, drow)) + if not _np.isfinite(length) or length <= 0.0: + length = 1.0 + # Perpendicular unit vector (pixel space) + u_perp = _np.array([-drow, dcol], dtype=float) / length + half = (width_px - 1) / 2.0 + offsets = _np.linspace(-half, half, width_px, dtype=float) + samples = [] + for off in offsets: + cc = cols + off * u_perp[1] + rr = rows + off * u_perp[0] + samples.append(_bilinear(img, cc, rr)) + prof = _np.mean(_np.vstack(samples), axis=0) + + # Physical coordinates per sample and distance + U_samples = U1 + ts * (U2 - U1) + V_samples = V1 + ts * (V2 - V1) + dist = _np.sqrt((U_samples - U1) ** 2 + (V_samples - V1) ** 2) + + lc = { + "distance": dist.astype(_np.float32), + "intensity": _np.asarray(prof, dtype=_np.float32), + "U": _np.asarray(U_samples, dtype=_np.float32), + "V": _np.asarray(V_samples, dtype=_np.float32), + "endpoints": ((float(U1), float(V1)), (float(U2), float(V2))), + "orientation": str(orientation), + } + + if show and not interactive: + # Overlay line and show profile + if plt is not None: + fig, axes = plt.subplots(1, 2, figsize=(10, 4)) + ax_img, ax_prof = axes + extent = [U_min, U_max, V_min, V_max] + ax_img.imshow(img, origin='lower', extent=extent, cmap='viridis', aspect='auto') + ax_img.plot([U1, U2], [V1, V2], color='cyan', linewidth=2) + ax_img.set_title(f"Slice ({orientation}) with line cut") + ax_img.set_xlabel('U'); ax_img.set_ylabel('V') + ax_prof.plot(dist, prof, color='magenta') + ax_prof.set_xlabel('Distance') + ax_prof.set_ylabel('Intensity') + ax_prof.set_title("Line cut profile") + plt.tight_layout() + plt.show() + + # Interactive draggable endpoints with live profile updates + if interactive: + if plt is None: + raise ImportError("matplotlib is required for interactive line_cut") + # Check backend; warn and fallback to static overlay if non-interactive + import matplotlib as _mpl + _backend = str(getattr(_mpl, "get_backend", lambda: "")()).lower() + if ("inline" in _backend) or ("agg" in _backend): + try: + print(f"DashAnalysis.line_cut interactive=True requires an interactive Matplotlib backend. Detected backend: {_mpl.get_backend()}. Run '%matplotlib widget' (preferred, requires 'ipympl') or '%matplotlib notebook' in a notebook cell, then retry.") + except Exception: + pass + # Fallback: draw static overlay and return + if show: + fig, axes = plt.subplots(1, 2, figsize=(10, 4)) + ax_img, ax_prof = axes + extent = [U_min, U_max, V_min, V_max] + ax_img.imshow(img, origin='lower', extent=extent, cmap='viridis', aspect='auto') + ax_img.plot([U1, U2], [V1, V2], color='cyan', linewidth=2) + ax_img.set_title(f"Slice ({orientation}) with line cut (static, non-interactive backend)") + ax_img.set_xlabel('U'); ax_img.set_ylabel('V') + ax_prof.plot(dist, prof, color='magenta') + ax_prof.set_xlabel('Distance') + ax_prof.set_ylabel('Intensity') + ax_prof.set_title("Line cut profile") + plt.tight_layout() + plt.show() + return lc + try: + from matplotlib.lines import Line2D + except Exception: + Line2D = None + + extent = [U_min, U_max, V_min, V_max] + fig, (ax_img, ax_prof) = plt.subplots(1, 2, figsize=(10, 4)) + im = ax_img.imshow(img, origin='lower', extent=extent, cmap='viridis', aspect='auto') + ax_img.set_title(f"Slice ({orientation}) — drag endpoints") + # initial endpoints from spec + p1 = [float(U1), float(V1)] + p2 = [float(U2), float(V2)] + + # line + endpoint markers + if Line2D is not None: + line = Line2D([p1[0], p2[0]], [p1[1], p2[1]], color='cyan', lw=2) + ax_img.add_line(line) + else: + line_plot, = ax_img.plot([p1[0], p2[0]], [p1[1], p2[1]], color='cyan', lw=2) + pt1 = ax_img.plot(p1[0], p1[1], 'o', color='cyan', ms=8, picker=5)[0] + pt2 = ax_img.plot(p2[0], p2[1], 'o', color='cyan', ms=8, picker=5)[0] + + # initial profile + prof_line, = ax_prof.plot(dist, prof, color='magenta') + ax_prof.set_xlabel('Distance'); ax_prof.set_ylabel('Intensity') + ax_prof.set_title('Line cut profile') + + state = {"drag": None} + + def update_profile(): + # recompute with current endpoints using cached image+extent + lc_local = self.line_cut((tuple((float(pt1.get_xdata()[0]), float(pt1.get_ydata()[0]))), + tuple((float(pt2.get_xdata()[0]), float(pt2.get_ydata()[0])))), + vol=(img, extent), + n_samples=n_samples, + width_px=width_px, + show=False) + prof_line.set_data(lc_local["distance"], lc_local["intensity"]) + ax_prof.relim(); ax_prof.autoscale_view() + fig.canvas.draw_idle() + + def on_press(event): + if event.inaxes != ax_img: + return + x, y = event.xdata, event.ydata + if x is None or y is None: + return + # pick nearest endpoint + d1 = float(np.hypot(x - pt1.get_xdata()[0], y - pt1.get_ydata()[0])) + d2 = float(np.hypot(x - pt2.get_xdata()[0], y - pt2.get_ydata()[0])) + state["drag"] = 0 if d1 <= d2 else 1 + + def on_motion(event): + if state["drag"] is None or event.inaxes != ax_img: + return + x, y = event.xdata, event.ydata + if x is None or y is None: + return + # constrain to extents + x = float(np.clip(x, U_min, U_max)) + y = float(np.clip(y, V_min, V_max)) + if state["drag"] == 0: + pt1.set_data([x], [y]) + else: + pt2.set_data([x], [y]) + if Line2D is not None: + line.set_data([pt1.get_xdata()[0], pt2.get_xdata()[0]], + [pt1.get_ydata()[0], pt2.get_ydata()[0]]) + else: + line_plot.set_data([pt1.get_xdata()[0], pt2.get_xdata()[0]], + [pt1.get_ydata()[0], pt2.get_ydata()[0]]) + update_profile() + + def on_release(event): + state["drag"] = None + + cid1 = fig.canvas.mpl_connect('button_press_event', on_press) + cid2 = fig.canvas.mpl_connect('motion_notify_event', on_motion) + cid3 = fig.canvas.mpl_connect('button_release_event', on_release) + + plt.tight_layout() + plt.show() + + return lc + + def show_slice(self, slice_mesh, shape=None, cmap='viridis', + clim=None, min_intensity=None, max_intensity=None, axes=None, return_image=False, + axis_display='hkl', show_grid=False, shape_data=True): + """ + Display a pre-computed slice mesh as a 2D raster with interactive features. + + This method visualizes slice data that has already been created by slice_data(), + providing rasterization, intensity range calculation, and interactive hover tooltips. + + Usage: + # First create the slice + slice_mesh = da.slice_data(data, hkl='HK', shape=(100, 100)) + + # Then display it + da.show_slice(slice_mesh) + + # Or display with custom settings + da.show_slice(slice_mesh, cmap='hot', clim=(0, 1000)) + + # Display with simple U/V labels + da.show_slice(slice_mesh, axis_display='uv') + + Parameters: + slice_mesh: pv.PolyData slice mesh from slice_data() with: + - 'intensity' point data array + - field_data containing slice metadata (normal, origin, axes, etc.) + shape (tuple): (H, W) raster resolution. If None, uses (512, 512) + cmap (str): Matplotlib colormap name for display + clim (tuple): (vmin, vmax) intensity display limits. Overridden by min/max_intensity + min_intensity (float): Minimum intensity threshold (filters and sets vmin) + max_intensity (float): Maximum intensity threshold (filters and sets vmax) + axes: Optional matplotlib Axes object for rendering. Creates new figure if None + return_image (bool): If True, returns (img, extent) tuple instead of displaying + axis_display (str): Axis label format. Options: + - 'hkl' (default): Shows formatted HKL expressions (e.g., "H + K", "L/2") + - 'uv': Shows simple "U" and "V" labels + + Returns: + tuple or None: If return_image=True, returns (img, extent) where: + - img: np.ndarray of rasterized intensity values + - extent: [U_min, U_max, V_min, V_max] physical coordinate bounds + Otherwise returns None and displays the slice + + Raises: + ImportError: If matplotlib is not available + TypeError: If slice_mesh is not a pv.PolyData + ValueError: If slice_mesh lacks required data + + Examples: + # Basic display + slice_mesh = da.slice_data(volume, hkl='HK') + da.show_slice(slice_mesh) + + # High-resolution display with filtering + da.show_slice(slice_mesh, shape=(1024, 1024), min_intensity=50) + + # Get image data for line cut analysis + img, extent = da.show_slice(slice_mesh, return_image=True) + line_data = da.line_cut('zero', param=(0.5, 'x'), vol=(img, extent)) + + # Display with simple U/V labels + da.show_slice(slice_mesh, axis_display='uv') + """ + if plt is None: + raise ImportError("matplotlib is required for show_slice()") + + if not isinstance(slice_mesh, pv.PolyData): + raise TypeError("slice_mesh must be a pv.PolyData object from slice_data()") + + import numpy as _np + + # Get metadata from slice mesh + normal_fd = getattr(slice_mesh, 'field_data', {}).get('slice_normal', None) + origin_fd = getattr(slice_mesh, 'field_data', {}).get('slice_origin', None) + u_axis_fd = getattr(slice_mesh, 'field_data', {}).get('slice_u_axis', None) + v_axis_fd = getattr(slice_mesh, 'field_data', {}).get('slice_v_axis', None) + u_label_fd = getattr(slice_mesh, 'field_data', {}).get('slice_u_label', None) + v_label_fd = getattr(slice_mesh, 'field_data', {}).get('slice_v_label', None) + + # Resolve shape: prefer stored slice_shape from slice_data, else given shape, else 512x512 + stored_shape = getattr(slice_mesh, 'field_data', {}).get('slice_shape', None) + if isinstance(shape, (tuple, list)) and len(shape) == 2: + H, W = int(shape[0]), int(shape[1]) + elif stored_shape is not None and len(stored_shape) >= 2: + H, W = int(stored_shape[0]), int(stored_shape[1]) + else: + H, W = 512, 512 + H = max(int(H), 1) + W = max(int(W), 1) + + # Rasterize the slice mesh + pts = _np.asarray(slice_mesh.points, dtype=float) + try: + vals = _np.asarray(slice_mesh['intensity'], dtype=float).reshape(-1) + except Exception: + raise ValueError("slice_mesh must have 'intensity' point data array") + + if pts.size == 0 or vals.size == 0: + raise ValueError("slice_mesh contains no valid points to rasterize") + + # Get normal and origin from metadata + normal = _np.asarray(normal_fd if normal_fd is not None else [0, 0, 1], dtype=float) + origin = _np.asarray(origin_fd if origin_fd is not None else slice_mesh.center, dtype=float) + + # Normalize normal + n_norm = float(_np.linalg.norm(normal)) + if n_norm > 0: + normal = normal / n_norm + else: + normal = _np.array([0.0, 0.0, 1.0], dtype=float) + + # --- START Infer orientation from normal START --- # + X = _np.array([1.0, 0.0, 0.0], dtype=float) # H + Y = _np.array([0.0, 1.0, 0.0], dtype=float) # K + Z = _np.array([0.0, 0.0, 1.0], dtype=float) # L + tolerance = 0.95 + dX = abs(float(_np.dot(normal, X))) + dY = abs(float(_np.dot(normal, Y))) + dZ = abs(float(_np.dot(normal, Z))) + + if dZ >= tolerance: + # HK plane + U = pts[:, 0] + V = pts[:, 1] + orientation = "HK" + orth_label = "L" + orth_value = float(origin[2]) + elif dX >= tolerance: + # KL plane + U = pts[:, 1] + V = pts[:, 2] + orientation = "KL" + orth_label = "H" + orth_value = float(origin[0]) + elif dY >= tolerance: + # HL plane + U = pts[:, 0] + V = pts[:, 2] + orientation = "HL" + orth_label = "K" + orth_value = float(origin[1]) + else: + # Custom orientation - use stored axes if available + if u_axis_fd is not None and v_axis_fd is not None: + u = _np.asarray(u_axis_fd, dtype=float) + v = _np.asarray(v_axis_fd, dtype=float) + else: + # Build orthonormal basis + world_axes = [X, Y, Z] + ref = world_axes[0] + for ax in world_axes: + if abs(float(_np.dot(ax, normal))) < 0.9: + ref = ax + break + u = _np.cross(normal, ref) + u_norm = float(_np.linalg.norm(u)) + if u_norm > 0: + u = u / u_norm + else: + u = _np.array([1.0, 0.0, 0.0], dtype=float) + v = _np.cross(normal, u) + v_norm = float(_np.linalg.norm(v)) + if v_norm > 0: + v = v / v_norm + else: + v = _np.array([0.0, 1.0, 0.0], dtype=float) + + # Project points + rel = pts - origin[None, :] + U = rel.dot(u) + V = rel.dot(v) + orientation = "Custom" + orth_label = None + orth_value = None + # --- END Infer orientation from normal END --- # + # Apply intensity filtering if requested + if (min_intensity is not None) or (max_intensity is not None): + mask = _np.ones(vals.shape, dtype=bool) + if min_intensity is not None: + mask &= (vals >= float(min_intensity)) + if max_intensity is not None: + mask &= (vals <= float(max_intensity)) + if _np.any(mask): + U = U[mask] + V = V[mask] + vals = vals[mask] + else: + # No points pass filter + U = _np.array([]) + V = _np.array([]) + vals = _np.array([]) + + # Calculate extents + if len(U) > 0: + U_min, U_max = float(_np.min(U)), float(_np.max(U)) + V_min, V_max = float(_np.min(V)), float(_np.max(V)) + else: + U_min, U_max = -0.5, 0.5 + V_min, V_max = -0.5, 0.5 + + if U_max == U_min: + U_min -= 0.5 + U_max += 0.5 + if V_max == V_min: + V_min -= 0.5 + V_max += 0.5 + + # If caller changed shape relative to stored slice_shape, expand/shrink HKL extents + # to keep per-pixel physical size consistent. This makes axis ranges change with shape. + try: + stored_shape_fd = getattr(slice_mesh, 'field_data', {}).get('slice_shape', None) + if shape_data and (stored_shape_fd is not None) and isinstance(shape, (tuple, list)) and (len(shape) == 2): + orig_H = int(stored_shape_fd[0]) + orig_W = int(stored_shape_fd[1]) + new_H = int(H) + new_W = int(W) + if (orig_H > 0) and (orig_W > 0) and ((new_H != orig_H) or (new_W != orig_W)): + u_center = 0.5 * (U_min + U_max) + v_center = 0.5 * (V_min + V_max) + # Compute original per-pixel sizes; fallback to current ranges if degenerate + u_pp = (U_max - U_min) / float(orig_W) if (U_max != U_min and orig_W > 0) else (U_max - U_min) + v_pp = (V_max - V_min) / float(orig_H) if (V_max != V_min and orig_H > 0) else (V_max - V_min) + new_u_range = float(u_pp) * float(new_W) + new_v_range = float(v_pp) * float(new_H) + U_min = float(u_center) - 0.5 * float(new_u_range) + U_max = float(u_center) + 0.5 * float(new_u_range) + V_min = float(v_center) - 0.5 * float(new_v_range) + V_max = float(v_center) + 0.5 * float(new_v_range) + except Exception: + # Be permissive; if anything fails here, continue with original extents + pass + + # Rasterize to image + if len(vals) > 0: + # Direct image placement for plane-generated slices (no histogram2d re-binning) + is_plane_grid = False + try: + stored_shape_fd = getattr(slice_mesh, 'field_data', {}).get('slice_shape', None) + if (stored_shape_fd is not None) and (int(stored_shape_fd[0]) * int(stored_shape_fd[1]) == int(pts.shape[0])): + is_plane_grid = True + except Exception: + is_plane_grid = False + + if is_plane_grid and (pts.shape[0] == (H * W)): + img = _np.asarray(vals, dtype=_np.float32).reshape(H, W) + else: + sum_img, _, _ = _np.histogram2d(V, U, bins=[H, W], + range=[[V_min, V_max], [U_min, U_max]], + weights=vals) + cnt_img, _, _ = _np.histogram2d(V, U, bins=[H, W], + range=[[V_min, V_max], [U_min, U_max]]) + with _np.errstate(invalid="ignore", divide="ignore"): + img = _np.zeros_like(sum_img, dtype=_np.float32) + nz = cnt_img > 0 + img[nz] = (sum_img[nz] / cnt_img[nz]).astype(_np.float32) + img[~nz] = 0.0 + + valid_pixels = img[_np.isfinite(img)] + if valid_pixels.size > 0: + actual_min = float(_np.nanmin(valid_pixels)) + actual_max = float(_np.nanmax(valid_pixels)) + else: + actual_min = 0.0 + actual_max = 0.0 + else: + img = _np.zeros((H, W), dtype=_np.float32) + actual_min = 0.0 + actual_max = 0.0 + try: + # cache for da.line_cut when called without vol + self._last_image = img + self._last_extent = [U_min, U_max, V_min, V_max] + self._last_orientation = orientation + except Exception: + pass + + extent = [U_min, U_max, V_min, V_max] + if axes is None: + fig, ax = plt.subplots(figsize=(6, 5)) + else: + ax = axes + fig = ax.figure + # Resolve display limits: use actual rasterized data range + vmin = float(min_intensity) if (min_intensity is not None) else actual_min + vmax = float(max_intensity) if (max_intensity is not None) else actual_max + + # Override with clim if provided + if clim: + if clim[0] is not None: + vmin = clim[0] + if clim[1] is not None: + vmax = clim[1] + + # Check for slice-stored clim as fallback + if (vmin is None or vmax is None): + try: + clim_fd = getattr(slice_mesh, 'field_data', {}).get('slice_intensity_clim', None) + if clim_fd is not None: + c = _np.asarray(clim_fd, dtype=float).reshape(-1) + if c.size >= 2 and _np.isfinite(c[0]) and _np.isfinite(c[1]): + if vmin is None: + vmin = float(c[0]) + if vmax is None: + vmax = float(c[1]) + except Exception: + pass + + + im = ax.imshow(img, origin='lower', extent=extent, cmap=cmap, + vmin=vmin, + vmax=vmax, + aspect='auto') + # Apply rectangular view when requested without reshaping data + try: + if (not shape_data) and isinstance(shape, (tuple, list)) and len(shape) == 2: + _H, _W = int(shape[0]), int(shape[1]) + _ratio = float(_H) / float(_W) if (_W != 0) else 1.0 + try: + ax.set_box_aspect(_ratio) + except Exception: + try: + # Fallback: adjust data aspect to approximate box aspect + ax.set_aspect(((extent[3] - extent[2]) / (extent[1] - extent[0])) * _ratio, adjustable='box') + except Exception: + pass + except Exception: + pass + # Origin overlay textbox + origin_text = f"Origin: H={origin[0]:.3f}, K={origin[1]:.3f}, L={origin[2]:.3f}" + origin_text_artist = None + origin_marker_artist = None + try: + origin_text_artist = ax.text( + 0.0, + -0.10, + origin_text, + transform=ax.transAxes, + ha='left', + va='top', + fontsize=9, + color='black', + clip_on=False + ) + except Exception: + origin_text_artist = None + + # ----- Interactive hover label using annotate + mouse events + fig = ax.figure + # Interactive checkbox to toggle origin visibility + ann = ax.annotate( + "", + xy=(0, 0), + xytext=(12, 12), + textcoords="offset points", + fontsize=9, + color="white", + bbox=dict(boxstyle="round", fc="black", ec="white", alpha=0.85), + arrowprops=dict(arrowstyle="->", color="white", alpha=0.85) + ) + ann.set_visible(False) + + def _label_text(x_coord, y_coord, intensity_val): + # Hover label: show UV only when axis_display == 'uv' + # Otherwise show H,K,L (all three), with orth axis from origin for canonical planes + try: + if axis_display == 'uv': + return f"U={x_coord:.3f}, V={y_coord:.3f}\nIntensity={float(intensity_val):.1f}" + if orientation == "HK": + return f"H={y_coord:.3f}, K={x_coord:.3f}, L={float(origin[2]):.3f}\nIntensity={float(intensity_val):.1f}" + if orientation == "KL": + return f"H={float(origin[0]):.3f}, K={x_coord:.3f}, L={y_coord:.3f}\nIntensity={float(intensity_val):.1f}" + if orientation == "HL": + return f"H={x_coord:.3f}, K={float(origin[1]):.3f}, L={y_coord:.3f}\nIntensity={float(intensity_val):.1f}" + # Custom orientation: project back to HKL; fix orth axis (closest to normal) to origin + if 'u' in locals() and 'v' in locals() and isinstance(u, _np.ndarray) and isinstance(v, _np.ndarray): + hkl = _np.asarray(origin, dtype=float) + x_coord * _np.asarray(u, dtype=float) + y_coord * _np.asarray(v, dtype=float) + X = _np.array([1.0, 0.0, 0.0], dtype=float) + Y = _np.array([0.0, 1.0, 0.0], dtype=float) + Z = _np.array([0.0, 0.0, 1.0], dtype=float) + d = _np.asarray([abs(float(_np.dot(normal, X))), + abs(float(_np.dot(normal, Y))), + abs(float(_np.dot(normal, Z)))], dtype=float) + idx = int(_np.argmax(d)) + hkl[idx] = float(origin[idx]) + return f"H={float(hkl[0]):.3f}, K={float(hkl[1]):.3f}, L={float(hkl[2]):.3f}\nIntensity={float(intensity_val):.1f}" + except Exception: + pass + # Fallback + return f"U={x_coord:.3f}, V={y_coord:.3f}\nIntensity={float(intensity_val):.1f}" + + def _on_move(event): + if event.inaxes is not ax: + return + x = event.xdata + y = event.ydata + if x is None or y is None: + return + try: + col = int((x - extent[0]) / (extent[1] - extent[0]) * img.shape[1]) + row = int((y - extent[2]) / (extent[3] - extent[2]) * img.shape[0]) + col = max(0, min(img.shape[1] - 1, col)) + row = max(0, min(img.shape[0] - 1, row)) + intensity = img[row, col] + except Exception: + return + ann.xy = (x, y) + ann.set_text(_label_text(x, y, intensity)) + if not ann.get_visible(): + ann.set_visible(True) + try: + fig.canvas.draw_idle() + except Exception: + pass + + def _on_leave(event): + if ann.get_visible(): + ann.set_visible(False) + try: + fig.canvas.draw_idle() + except Exception: + pass + + try: + fig.canvas.mpl_connect("motion_notify_event", _on_move) + fig.canvas.mpl_connect("axes_leave_event", _on_leave) + except Exception: + pass + + # Set axis labels based on axis_display parameter + if axis_display == 'uv': + # Simple U/V labels + ax.set_xlabel('U') + ax.set_ylabel('V') + else: + # HKL formatting (default) + if u_label_fd is not None and v_label_fd is not None: + ax.set_xlabel(str(u_label_fd)) + ax.set_ylabel(str(v_label_fd)) + elif u_axis_fd is not None and v_axis_fd is not None: + ax.set_xlabel(format_hkl_axis(u_axis_fd)) + ax.set_ylabel(format_hkl_axis(v_axis_fd)) + elif orientation == "HK": + ax.set_xlabel('H') + ax.set_ylabel('K') + elif orientation == "KL": + ax.set_xlabel('K') + ax.set_ylabel('L') + elif orientation == "HL": + ax.set_xlabel('H') + ax.set_ylabel('L') + else: + ax.set_xlabel('U') + ax.set_ylabel('V') + + # Title: include orth axis for canonical planes; for Custom, use nearest axis to normal + title = None + fallback_label = None + fallback_value = None + try: + X = _np.array([1.0, 0.0, 0.0], dtype=float) + Y = _np.array([0.0, 1.0, 0.0], dtype=float) + Z = _np.array([0.0, 0.0, 1.0], dtype=float) + dX = abs(float(_np.dot(normal, X))) + dY = abs(float(_np.dot(normal, Y))) + dZ = abs(float(_np.dot(normal, Z))) + idx = int(_np.argmax(_np.asarray([dX, dY, dZ], dtype=float))) + labels = ['H', 'K', 'L'] + fallback_label = labels[idx] + fallback_value = float(origin[idx]) + except Exception: + pass + if orientation in ("HK", "KL", "HL") and (orth_label is not None) and (orth_value is not None) and _np.isfinite(orth_value): + title = f'{orientation} plane ({orth_label} = {orth_value:.3f})' + elif (fallback_label is not None) and (fallback_value is not None) and _np.isfinite(fallback_value): + title = f'{orientation} slice ({fallback_label} = {fallback_value:.3f})' + else: + title = f'{orientation} slice' + ax.set_title(title) + + ax.figure.colorbar(im, ax=ax, label='Intensity') + + # Add grid if requested + if show_grid: + ax.grid(True, alpha=0.3, linestyle='--', linewidth=0.5) + + # Use Matplotlib status bar readout via format_coord (built-in hover) + def _format_coord(x_coord, y_coord): + try: + col = int((x_coord - extent[0]) / (extent[1] - extent[0]) * img.shape[1]) + row = int((y_coord - extent[2]) / (extent[3] - extent[2]) * img.shape[0]) + col = max(0, min(img.shape[1] - 1, col)) + row = max(0, min(img.shape[0] - 1, row)) + intensity = img[row, col] + except Exception: + return "" + # Status bar readout mirrors hover: UV for axis_display == 'uv'; else show H,K,L + try: + if axis_display == 'uv': + return f"U: {x_coord:.3f}, V: {y_coord:.3f} Intensity: {float(intensity):.1f}" + if orientation == "HK": + return f"H: {y_coord:.3f}, K: {x_coord:.3f}, L: {float(origin[2]):.3f} Intensity: {float(intensity):.1f}" + if orientation == "KL": + return f"H: {float(origin[0]):.3f}, K: {x_coord:.3f}, L: {y_coord:.3f} Intensity: {float(intensity):.1f}" + if orientation == "HL": + return f"H: {x_coord:.3f}, K: {float(origin[1]):.3f}, L: {y_coord:.3f} Intensity: {float(intensity):.1f}" + if 'u' in locals() and 'v' in locals() and isinstance(u, _np.ndarray) and isinstance(v, _np.ndarray): + hkl = _np.asarray(origin, dtype=float) + x_coord * _np.asarray(u, dtype=float) + y_coord * _np.asarray(v, dtype=float) + return f"H: {float(hkl[0]):.3f}, K: {float(hkl[1]):.3f}, L: {float(hkl[2]):.3f} Intensity: {float(intensity):.1f}" + except Exception: + pass + return f"U: {x_coord:.3f}, V: {y_coord:.3f} Intensity: {float(intensity):.1f}" + ax.format_coord = _format_coord + + if axes is None: # Only show if standalone + plt.show() + + if return_image: + return img, extent + return None + + + try: + self._last_image = img + self._last_extent = [U_min, U_max, V_min, V_max] + self._last_orientation = orientation + except Exception: + pass + + # Display via matplotlib imshow with physical axis labels + extent = [U_min, U_max, V_min, V_max] + if axes is None: + fig, ax = plt.subplots(figsize=(6, 5)) + else: + ax = axes + fig = ax.figure + + # Determine display limits + vmin = float(min_intensity) if (min_intensity is not None) else actual_min + vmax = float(max_intensity) if (max_intensity is not None) else actual_max + if clim: + if clim[0] is not None: + vmin = clim[0] + if clim[1] is not None: + vmax = clim[1] + if (vmin is None or vmax is None): + try: + clim_fd = getattr(slice_mesh, 'field_data', {}).get('slice_intensity_clim', None) + if clim_fd is not None: + c = _np.asarray(clim_fd, dtype=float).reshape(-1) + if c.size >= 2 and _np.isfinite(c[0]) and _np.isfinite(c[1]): + if vmin is None: + vmin = float(c[0]) + if vmax is None: + vmax = float(c[1]) + except Exception: + pass + + im = ax.imshow(img, origin='lower', extent=extent, cmap=cmap, vmin=vmin, vmax=vmax, aspect='auto') + # Apply rectangular view when requested without reshaping data + try: + if (not shape_data) and isinstance(shape, (tuple, list)) and len(shape) == 2: + _H, _W = int(shape[0]), int(shape[1]) + _ratio = float(_H) / float(_W) if (_W != 0) else 1.0 + try: + ax.set_box_aspect(_ratio) + except Exception: + try: + ax.set_aspect(((extent[3] - extent[2]) / (extent[1] - extent[0])) * _ratio, adjustable='box') + except Exception: + pass + except Exception: + pass + # Origin overlay textbox + origin_text = f"Origin: H={origin[0]:.3f}, K={origin[1]:.3f}, L={origin[2]:.3f}" + origin_text_artist = None + origin_marker_artist = None + try: + origin_text_artist = ax.text( + 0.0, + -0.15, + origin_text, + transform=ax.transAxes, + ha='left', + va='top', + fontsize=9, + color='black', + clip_on=False + ) + except Exception: + origin_text_artist = None + # ----- START Interactive hover label using annotate + mouse events START ----- # + fig = ax.figure + ann = ax.annotate( + "", + xy=(0, 0), + xytext=(12, 12), + textcoords="offset points", + fontsize=9, + color="white", + bbox=dict(boxstyle="round", fc="black", ec="white", alpha=0.85), + arrowprops=dict(arrowstyle="->", color="white", alpha=0.85) + ) + ann.set_visible(False) + + def _label_text(x_coord, y_coord, intensity_val): + # Hover: UV only when axis_display == 'uv'; else show H,K,L triple + try: + if axis_display == 'uv': + return f"U={x_coord:.3f}, V={y_coord:.3f}\nIntensity={float(intensity_val):.1f}" + if orientation == "HK": + return f"H={y_coord:.3f}, K={x_coord:.3f}, L={float(origin[2]):.3f}\nIntensity={float(intensity_val):.1f}" + if orientation == "KL": + return f"H={float(origin[0]):.3f}, K={x_coord:.3f}, L={y_coord:.3f}\nIntensity={float(intensity_val):.1f}" + if orientation == "HL": + return f"H={x_coord:.3f}, K={float(origin[1]):.3f}, L={y_coord:.3f}\nIntensity={float(intensity_val):.1f}" + if 'u' in locals() and 'v' in locals() and isinstance(u, _np.ndarray) and isinstance(v, _np.ndarray): + hkl = _np.asarray(origin, dtype=float) + x_coord * _np.asarray(u, dtype=float) + y_coord * _np.asarray(v, dtype=float) + X = _np.array([1.0, 0.0, 0.0], dtype=float) + Y = _np.array([0.0, 1.0, 0.0], dtype=float) + Z = _np.array([0.0, 0.0, 1.0], dtype=float) + d = _np.asarray([abs(float(_np.dot(normal, X))), + abs(float(_np.dot(normal, Y))), + abs(float(_np.dot(normal, Z)))], dtype=float) + idx = int(_np.argmax(d)) + hkl[idx] = float(origin[idx]) + return f"H={float(hkl[0]):.3f}, K={float(hkl[1]):.3f}, L={float(hkl[2]):.3f}\nIntensity={float(intensity_val):.1f}" + except Exception: + pass + return f"U={x_coord:.3f}, V={y_coord:.3f}\nIntensity={float(intensity_val):.1f}" + + def _on_move(event): + if event.inaxes is not ax: + return + x = event.xdata + y = event.ydata + if x is None or y is None: + return + try: + col = int((x - extent[0]) / (extent[1] - extent[0]) * img.shape[1]) + row = int((y - extent[2]) / (extent[3] - extent[2]) * img.shape[0]) + col = max(0, min(img.shape[1] - 1, col)) + row = max(0, min(img.shape[0] - 1, row)) + intensity = img[row, col] + except Exception: + return + ann.xy = (x, y) + ann.set_text(_label_text(x, y, intensity)) + if not ann.get_visible(): + ann.set_visible(True) + try: + fig.canvas.draw_idle() + except Exception: + pass + + def _on_leave(event): + if ann.get_visible(): + ann.set_visible(False) + try: + fig.canvas.draw_idle() + except Exception: + pass + + try: + fig.canvas.mpl_connect("motion_notify_event", _on_move) + fig.canvas.mpl_connect("axes_leave_event", _on_leave) + except Exception: + pass + # ----- END Interactive hover label using annotate + mouse events END ----- # + + # --- START Set axis labels based on axis_display parameter ---- START # + if axis_display == 'uv': + # Simple U/V labels + ax.set_xlabel('U') + ax.set_ylabel('V') + else: + # HKL formatting (default) + if u_label_fd is not None and v_label_fd is not None: + ax.set_xlabel(str(u_label_fd)) + ax.set_ylabel(str(v_label_fd)) + elif u_axis_fd is not None and v_axis_fd is not None: + ax.set_xlabel(format_hkl_axis(u_axis_fd)) + ax.set_ylabel(format_hkl_axis(v_axis_fd)) + elif orientation == "HK": + ax.set_xlabel('H') + ax.set_ylabel('K') + elif orientation == "KL": + ax.set_xlabel('K') + ax.set_ylabel('L') + elif orientation == "HL": + ax.set_xlabel('H') + ax.set_ylabel('L') + else: + ax.set_xlabel('U') + ax.set_ylabel('V') + # --- END Set axis labels based on axis_display parameter ---- END # + + # Title: include orth axis for canonical planes; for Custom, use nearest axis to normal + fallback_label = None + fallback_value = None + try: + X = _np.array([1.0, 0.0, 0.0], dtype=float) + Y = _np.array([0.0, 1.0, 0.0], dtype=float) + Z = _np.array([0.0, 0.0, 1.0], dtype=float) + dX = abs(float(_np.dot(normal, X))) + dY = abs(float(_np.dot(normal, Y))) + dZ = abs(float(_np.dot(normal, Z))) + idx = int(_np.argmax(_np.asarray([dX, dY, dZ], dtype=float))) + labels = ['H', 'K', 'L'] + fallback_label = labels[idx] + fallback_value = float(origin[idx]) + except Exception: + pass + if orientation in ("HK", "KL", "HL") and (orth_label is not None) and (orth_value is not None) and _np.isfinite(orth_value): + ax.set_title(f'{orientation} plane ({orth_label} = {orth_value:.3f})') + elif (fallback_label is not None) and (fallback_value is not None) and _np.isfinite(fallback_value): + ax.set_title(f'{orientation} slice ({fallback_label} = {fallback_value:.3f})') + else: + ax.set_title(f'{orientation} slice') + + ax.figure.colorbar(im, ax=ax, label='Intensity') + + # Add grid if requested + if show_grid: + ax.grid(True, alpha=0.3, linestyle='--', linewidth=0.5) + + # Use Matplotlib status bar readout via format_coord (built-in hover) + def _format_coord(x_coord, y_coord): + try: + col = int((x_coord - extent[0]) / (extent[1] - extent[0]) * img.shape[1]) + row = int((y_coord - extent[2]) / (extent[3] - extent[2]) * img.shape[0]) + col = max(0, min(img.shape[1] - 1, col)) + row = max(0, min(img.shape[0] - 1, row)) + intensity = img[row, col] + except Exception: + return "" + try: + if axis_display == 'uv': + return f"U: {x_coord:.3f}, V: {y_coord:.3f} Intensity: {float(intensity):.1f}" + if orientation == "HK": + return f"H: {y_coord:.3f}, K: {x_coord:.3f}, L: {float(origin[2]):.3f} Intensity: {float(intensity):.1f}" + if orientation == "KL": + return f"H: {float(origin[0]):.3f}, K: {x_coord:.3f}, L: {y_coord:.3f} Intensity: {float(intensity):.1f}" + if orientation == "HL": + return f"H: {x_coord:.3f}, K: {float(origin[1]):.3f}, L: {y_coord:.3f} Intensity: {float(intensity):.1f}" + if 'u' in locals() and 'v' in locals() and isinstance(u, _np.ndarray) and isinstance(v, _np.ndarray): + hkl = _np.asarray(origin, dtype=float) + x_coord * _np.asarray(u, dtype=float) + y_coord * _np.asarray(v, dtype=float) + X = _np.array([1.0, 0.0, 0.0], dtype=float) + Y = _np.array([0.0, 1.0, 0.0], dtype=float) + Z = _np.array([0.0, 0.0, 1.0], dtype=float) + d = _np.asarray([abs(float(_np.dot(normal, X))), + abs(float(_np.dot(normal, Y))), + abs(float(_np.dot(normal, Z)))], dtype=float) + idx = int(_np.argmax(d)) + hkl[idx] = float(origin[idx]) + return f"H: {float(hkl[0]):.3f}, K: {float(hkl[1]):.3f}, L: {float(hkl[2]):.3f} Intensity: {float(intensity):.1f}" + if 'u' in locals() and 'v' in locals() and isinstance(u, _np.ndarray) and isinstance(v, _np.ndarray): + hkl = _np.asarray(origin, dtype=float) + x_coord * _np.asarray(u, dtype=float) + y_coord * _np.asarray(v, dtype=float) + X = _np.array([1.0, 0.0, 0.0], dtype=float) + Y = _np.array([0.0, 1.0, 0.0], dtype=float) + Z = _np.array([0.0, 0.0, 1.0], dtype=float) + d = _np.asarray([abs(float(_np.dot(normal, X))), + abs(float(_np.dot(normal, Y))), + abs(float(_np.dot(normal, Z)))], dtype=float) + idx = int(_np.argmax(d)) + hkl[idx] = float(origin[idx]) + return f"H: {float(hkl[0]):.3f}, K: {float(hkl[1]):.3f}, L: {float(hkl[2]):.3f} Intensity: {float(intensity):.1f}" + except Exception: + pass + return f"U: {x_coord:.3f}, V: {y_coord:.3f} Intensity: {float(intensity):.1f}" + ax.format_coord = _format_coord + + if axes is None: + plt.show() + + if return_image: + return img, extent + return None + + def show_point_cloud(self, data, intensities=None, *, notebook=True, + point_size=1.0, cmap='viridis', opacity=1.0, + render_points_as_spheres=False, axes_labels=('H','K','L'), + clim=None, show_bounds=True, opacity_range=None): + """ + Render a point cloud in HKL space with advanced visualization options. + + This method provides comprehensive 3D point data visualization with support for + multiple data formats, opacity control, intensity filtering, and interactive features. + Supports both notebook and standalone rendering with customizable appearance. + + Usage: + # Basic point cloud rendering + da.show_point_cloud(data.points, data.intensities) + + # Advanced rendering with opacity control + da.show_point_cloud(data, clim=(100, 1000), opacity_range=(0.1, 1.0)) + + # High-quality spherical rendering + da.show_point_cloud(data, render_points_as_spheres=False, point_size=5.0) + + Parameters: + cloud: Point cloud data. Supported formats: + - (points, intensities) tuple/list + - Data object with .points and .intensities attributes + - Dict with 'points' and 'intensities' keys + - pv.PolyData with optional 'intensity' array + - np.ndarray of shape (N,3) for points (provide intensities separately) + intensities (array-like): Optional 1D array of length N for intensity values + notebook (bool): Use notebook plotter (True) or regular plotter (False) + point_size (float): Point size for rendering + cmap (str): Colormap name for intensity visualization + opacity (float): Point opacity [0..1] + render_points_as_spheres (bool): True for spherical glyphs, False for points + axes_labels (tuple): Axis labels, default ('H','K','L') + clim (tuple): Optional (vmin, vmax) intensity display limits + show_bounds (bool): Whether to show coordinate bounds with labels + opacity_range (tuple): Optional (min_opacity, max_opacity) for intensity-based opacity + Note: In notebook mode, rendering caps at 5,000,000 points; if more are provided, + the top 5,000,000 intensities are used to maintain interactive performance. + The optional clim=(vmin, vmax) controls color scaling only and does not affect + which points are selected or rendered. + + Returns: + PyVista rendering result (displays inline in notebooks) + + Raises: + ImportError: If PyVista is not available + TypeError: If cloud format is not supported + Examples: + # Basic visualization + da.show_point_cloud(point_data, intensity_data) + + # High-contrast visualization with filtering + da.show_point_cloud(data, clim=(50, 500), hide_out_of_range=True) + + # Opacity-based intensity mapping + da.show_point_cloud(data, opacity_range=(0.2, 1.0), cmap='plasma') + """ + # Normalize inputs to pv.PolyData + 'intensity' if available + pts = None + ints = None + poly = None + # Convert the data to polyData + if isinstance(data, pv.PolyData): + poly = data + if ('intensity' not in poly.array_names) and (intensities is not None): + poly['intensity'] = np.asarray(intensities, dtype=np.float32) + elif hasattr(data, 'points') and hasattr(data, 'intensities'): + pts = np.asarray(data.points, dtype=float) + ints = np.asarray(data.intensities, dtype=float) + poly = pv.PolyData(pts) + poly['intensity'] = ints.astype(np.float32) + elif isinstance(data, (tuple, list)) and len(data) >= 2: + pts = np.asarray(data[0], dtype=float) + ints = np.asarray(data[1], dtype=float) + poly = pv.PolyData(pts) + poly['intensity'] = ints.astype(np.float32) + elif isinstance(data, dict) and ('points' in data): + pts = np.asarray(data['points'], dtype=float) + ints = np.asarray(data.get('intensities', intensities), dtype=float) if ('intensities' in data or intensities is not None) else None + poly = pv.PolyData(pts) + if ints is not None and ints.shape[0] == pts.shape[0]: + poly['intensity'] = ints.astype(np.float32) + elif isinstance(data, np.ndarray) and data.ndim == 2 and data.shape[1] == 3: + pts = np.asarray(data, dtype=float) + poly = pv.PolyData(pts) + if intensities is not None: + poly['intensity'] = np.asarray(intensities, dtype=np.float32) + else: + raise TypeError("Unsupported data format. Provide (points, intensities), Data, dict, pv.PolyData, or Nx3 ndarray.") + print("Converted to poly data") + + # Cap points to top 5,000,000 intensities for performance in notebook mode + MAX_POINTS = 5_000_000 + try: + if bool(notebook) and poly is not None: + N = int(poly.n_points) + if N > MAX_POINTS: + if 'intensity' in poly.array_names: + ints_arr = np.asarray(poly['intensity'], dtype=np.float32).reshape(-1) + if ints_arr.shape[0] == N: + k = MAX_POINTS + idx = np.argpartition(ints_arr, N - k)[N - k:] + # Deterministic ordering by descending intensity + idx = idx[np.argsort(ints_arr[idx])[::-1]] + pts_arr = np.asarray(poly.points, dtype=float) + poly = pv.PolyData(pts_arr[idx]) + poly['intensity'] = ints_arr[idx].astype(np.float32) + try: + print(f"Notebook mode: capped point cloud from {N} to {int(poly.n_points)} by top intensities.") + except Exception: + pass + else: + # Fallback: intensity length mismatch; random subsample + rng = np.random.default_rng() + idx = rng.choice(N, size=MAX_POINTS, replace=False) + poly = pv.PolyData(np.asarray(poly.points, dtype=float)[idx]) + try: + print(f"Notebook mode: capped point cloud from {N} to {int(poly.n_points)} (random fallback due to intensity mismatch).") + except Exception: + pass + else: + # No intensities; random subsample to respect cap + rng = np.random.default_rng() + idx = rng.choice(N, size=MAX_POINTS, replace=False) + poly = pv.PolyData(np.asarray(poly.points, dtype=float)[idx]) + try: + print(f"Notebook mode: capped point cloud from {N} to {int(poly.n_points)} (random fallback, no intensities).") + except Exception: + pass + except Exception as _cap_err: + # Be permissive; if capping fails, continue with original data + try: + print(f"Notebook mode capping failed: {_cap_err}") + except Exception: + pass + + # Create plotter + p = pv.Plotter(notebook=bool(notebook)) + p.add_axes(xlabel=str(axes_labels[0]), ylabel=str(axes_labels[1]), zlabel=str(axes_labels[2])) + # Simple LUT configuration; use given clim for color scaling (if provided) + lut = pv.LookupTable(cmap=cmap) + lut.above_range_color = 'white' + lut.below_range_color = 'white' + lut.above_range_opacity= 0 + lut.below_range_opacity= 0 + lut.scalar_range = (opacity_range[0],opacity_range[1]) + + actor = p.add_mesh(poly, + scalars='intensity', + cmap=lut, + render_points_as_spheres=bool(render_points_as_spheres), + point_size=float(point_size), + clim=clim, + opacity=float(opacity) if opacity_range is None else 1.0, + name='points') + + # Optional bounds + if bool(show_bounds): + try: + p.show_bounds(mesh=poly, + xtitle=str(axes_labels[0]), + ytitle=str(axes_labels[1]), + ztitle=str(axes_labels[2]), + bounds=poly.bounds) + except Exception: + pass + + return p.show() + + def show_vol(self, vol, spacing=(1.0, 1.0, 1.0), origin=(0.0, 0.0, 0.0), cmap='jet'): + """ + Display a 3D HKL volume with comprehensive rendering options. + + This method provides volume rendering for 3D HKL data with automatic grid construction, + proper coordinate handling, and interactive visualization features. + + Usage: + # Display volume from ImageData + da.show_vol(pyvista_volume) + + # Display volume from NumPy array + da.show_vol(numpy_volume, spacing=(0.1, 0.1, 0.1)) + + Parameters: + vol: Volume data. Supported formats: + - pv.ImageData with cell_data['intensity'] + - np.ndarray (D,H,W) of intensity values (cell-centered) + spacing (tuple): Voxel spacing (ΔH, ΔK, ΔL) for NumPy arrays + origin (tuple): Grid origin (H0, K0, L0) for NumPy arrays + cmap (str): Colormap name for volume rendering + + Returns: + PyVista rendering result (displays inline in notebooks) + + Raises: + ValueError: If volume format is invalid or missing intensity data + TypeError: If vol is not a supported format + + Examples: + # Basic volume rendering + da.show_vol(volume_data) + + # High-resolution volume with custom spacing + da.show_vol(numpy_vol, spacing=(0.05, 0.05, 0.05), cmap='plasma') + """ + # Normalize input to a PyVista ImageData with cell_data['intensity'] + if isinstance(vol, pv.ImageData): + grid = vol + # Ensure 'intensity' exists + if 'intensity' not in grid.cell_data and 'intensity' in grid.point_data: + # Convert to cell_data for consistent D×H×W handling + grid = grid.point_data_to_cell_data(pass_point_data=False) + if 'intensity' not in grid.cell_data: + raise ValueError("ImageData must have cell_data['intensity'] for volume rendering.") + elif isinstance(vol, np.ndarray): + if vol.ndim != 3: + raise ValueError("NumPy volume must be 3D shaped (D, H, W).") + # Build grid: dimensions = cells + 1 (VTK requirement) + dims_cells = np.array(vol.shape, dtype=int) + grid = pv.ImageData() + grid.dimensions = (dims_cells + 1).tolist() + grid.spacing = tuple(float(x) for x in spacing) + grid.origin = tuple(float(x) for x in origin) + # For VTK/PyVista, flatten with Fortran order to match D×H×W cell-layout + grid.cell_data['intensity'] = np.asarray(vol, dtype=np.float32).flatten(order='F') + else: + raise TypeError("vol must be a pyvista.ImageData or a NumPy ndarray (D,H,W).") + + # Compute display clim from data range if available + try: + data = np.asarray(grid.cell_data['intensity']) + clim = (float(np.min(data)), float(np.max(data))) + except Exception: + clim = None + + # Render inline + plotter = pv.Plotter(notebook=True) + plotter.add_axes(xlabel='H', ylabel='K', zlabel='L') + plotter.add_volume(grid, scalars='intensity', cmap=cmap, clim=clim, name='cloud_volume', show_scalar_bar=True) + try: + plotter.show_bounds(mesh=grid, xtitle='H Axis', ytitle='K Axis', ztitle='L Axis', bounds=grid.bounds) + except Exception: + pass + return plotter.show() + + def create_vol(self, points, intensities): + """ + Create a 3D volume from point cloud data using adaptive interpolation. + + This method converts point cloud data into a structured 3D volume suitable for + visualization and analysis, with automatic resolution selection based on data density. + + Usage: + # Create volume from point cloud + volume = da.create_vol(data.points, data.intensities) + + # Display the created volume + da.show_vol(volume) + + Parameters: + points (array-like): 3D point coordinates with shape (N, 3) + intensities (array-like): Intensity values with shape (N,) + + Returns: + pv.ImageData: Interpolated volume with cell_data['intensity'] + + Examples: + # Create and display volume + vol = da.create_vol(point_data, intensity_data) + da.show_vol(vol, cmap='viridis') + """ + cloud = pv.PolyData(points) + cloud['intensity'] = intensities.astype('float32') + minb = cloud.points.min(axis=0) + maxb = cloud.points.max(axis=0) + data_range = maxb - minb + padding = data_range * 0.10 + grid_min = minb - padding + grid_max = maxb + padding + grid_range = grid_max - grid_min + + # Resolution: adaptive cells per axis (mirror slicer thresholds) + total_points = int(points.shape[0]) if hasattr(points, "shape") else len(points) + if total_points >= 5_000_000: + refine_cells = 250 + elif total_points >= 2_000_000: + refine_cells = 275 + elif total_points >= 500_000: + refine_cells = 300 + else: + refine_cells = 300 + spacing = grid_range / refine_cells + dimensions = np.ceil(grid_range / spacing).astype(int) + 1 + + # Create grid + grid = pv.ImageData() + grid.origin = grid_min + grid.spacing = spacing + grid.dimensions = dimensions + + # Interpolate cloud into volume + optimal_radius = float(np.mean(spacing) * 2.5) + vol = grid.interpolate(cloud, radius=optimal_radius, sharpness=1.5, null_value=0.0) + + return vol + + def load_data(self, file_path: Optional[str] = None): + """ + Load 3D point data and intensities from HDF5 file. + + This method uses the project's HDF5Loader for consistent data loading + and returns a Data object containing points and intensities. + + Usage: + # Load data from file + data = da.load_data('/path/to/file.h5') + print(f"Loaded {len(data.points)} points") + + Parameters: + file_path (str, optional): Path to HDF5 file. If None, uses cached path + + Returns: + Data: Object containing points (N,3) and intensities (N,) arrays + + Raises: + FileNotFoundError: If no file path provided and none cached + Exception: If file loading fails + + Examples: + # Load and visualize data + data = da.load_data('experiment_data.h5') + da.show_point_cloud(data) + """ + path = file_path or getattr(self, 'file_path', None) + if not path: + raise FileNotFoundError("No file path provided to load_data and none set in DashAnalysis.") + + try: + loader = HDF5Loader() + points_3d, intensities, num_images, shape = loader.load_h5_to_3d(path) + + # Load metadata using load_h5_with_coordinates which returns metadata + metadata_dict = loader.get_file_info(path, style='dict') + + # Create Data object with metadata + return Data(points_3d, intensities, metadata=metadata_dict) + except Exception as e: + raise Exception(f"Failed to load data using HDF5Loader: {e}") + + def show_meta(self, file_path, *, style="text", raw=False, include_unknown=True, + float_precision=6, summarize_datasets=True): + """ + Display metadata information from HDF5 file. + + This method provides comprehensive metadata inspection for HDF5 files + using the project's HDF5Loader with customizable output formatting. + + Usage: + # Display file metadata + da.show_meta('/path/to/file.h5') + + # Raw metadata with high precision + da.show_meta('data.h5', raw=True, float_precision=10) + + Parameters: + file_path (str): Path to HDF5 file + style (str): Output style ('text', 'json', etc.) + raw (bool): Include raw metadata + include_unknown (bool): Include unknown/unrecognized fields + float_precision (int): Decimal precision for floating point values + summarize_datasets (bool): Include dataset summaries + + Returns: + Metadata information (format depends on style parameter) + + Raises: + FileNotFoundError: If file path is invalid + + Examples: + # Basic metadata display + da.show_meta('data.h5') + + # Detailed JSON output + da.show_meta('data.h5', style='json', raw=True) + """ + if not file_path: + raise FileNotFoundError("No file path provided to load_data and none set in DashAnalysis.") + + loader = HDF5Loader() + return loader.get_file_info(file_path, style=style, raw=raw, include_unknown=include_unknown, + float_precision=float_precision, summarize_datasets=summarize_datasets) + + def _build_vol(self, data): + """ + Build a PyVista ImageData grid from loaded volume data. + + This internal method constructs PyVista grids from various data formats, + mirroring the viewer's approach for consistent handling. + + Parameters: + data: Volume data in supported formats: + - (volume_np, shape_tuple) tuple from HDF5Loader + - {'volume': np.ndarray, 'metadata': {...}} dict with metadata + + Returns: + pv.ImageData: Grid with cell_data['intensity'] populated + + Raises: + ImportError: If PyVista is not available + ValueError: If data format is unsupported or invalid + """ + import numpy as np + if pv is None: + raise ImportError("PyVista is required to build the volume. Install pyvista and retry.") + + # Extract volume and optional metadata + meta = {} + if isinstance(data, tuple) and len(data) >= 1: + volume = data[0] + elif isinstance(data, dict): + volume = data.get('volume') + meta = data.get('metadata') or {} + else: + raise ValueError("Unsupported data format for _build_vol; pass (volume, shape) tuple or {'volume': ..., 'metadata': ...} dict") + + if volume is None or not hasattr(volume, 'shape'): + raise ValueError("Invalid volume provided to _build_vol") + + # Determine cell-centered dimensions + try: + dims_cells_meta = meta.get('grid_dimensions_cells', None) + if dims_cells_meta is not None: + dims_cells = np.array(dims_cells_meta, dtype=int) + else: + dims_cells = np.array(volume.shape, dtype=int) + except Exception: + dims_cells = np.array(volume.shape, dtype=int) + + # Create grid with points-based dimensions (= cells + 1) + grid = pv.ImageData() + grid.dimensions = (dims_cells + 1).tolist() + + # Spacing and origin from metadata or defaults + spacing = meta.get('voxel_spacing') or (1.0, 1.0, 1.0) + origin = meta.get('grid_origin') or (0.0, 0.0, 0.0) + try: + grid.spacing = tuple(float(x) for x in spacing) + except Exception: + grid.spacing = (1.0, 1.0, 1.0) + try: + grid.origin = tuple(float(x) for x in origin) + except Exception: + grid.origin = (0.0, 0.0, 0.0) + + # Assign intensity scalars to cell_data using recorded array order + arr_order = (meta.get('array_order') or 'F') if isinstance(meta, dict) else 'F' + try: + grid.cell_data["intensity"] = volume.flatten(order=arr_order) + except Exception: + grid.cell_data["intensity"] = volume.flatten(order="F") + + return grid + + def _calculate_smart_radius(self, points, u_extent, v_extent, grid_shape): + """ + Calculate adaptive interpolation radius based on point density and grid resolution. + + This method computes an optimal interpolation radius to minimize gaps in slice + interpolation while maintaining appropriate resolution for the given data density. + + Parameters: + points (array-like): Points used for interpolation + u_extent (tuple): (u_min, u_max) extent in U direction + v_extent (tuple): (v_min, v_max) extent in V direction + grid_shape (tuple): (height, width) of target grid + + Returns: + float: Optimal interpolation radius + """ + import numpy as _np + + if len(points) == 0: + return 1e-6 + + # Calculate area and point density + u_range = max(u_extent[1] - u_extent[0], 1e-6) + v_range = max(v_extent[1] - v_extent[0], 1e-6) + area = u_range * v_range + point_density = len(points) / area + + # Calculate average point spacing (approximate) + avg_point_spacing = 1.0 / _np.sqrt(max(point_density, 1e-12)) + + # Calculate average grid cell size + avg_cell_u = u_range / max(grid_shape[1], 1) + avg_cell_v = v_range / max(grid_shape[0], 1) + avg_cell_size = (avg_cell_u + avg_cell_v) * 0.5 + + # Define density thresholds (points per unit area) + threshold_sparse = 0.5 + threshold_medium = 2.0 + + # Adaptive radius multiplier based on point density + if point_density < threshold_sparse: + # radius_multiplier = 4.5 # Very aggressive for very sparse data (legacy) + radius_multiplier = 2.0 # Tuned down for performance with large shapes + elif point_density < threshold_medium: + # radius_multiplier = 3.8 # Moderate for medium density (legacy) + radius_multiplier = 1.8 # Tuned down to limit neighbors per sample + else: + # radius_multiplier = 3.2 # Conservative for dense data (legacy) + radius_multiplier = 1.6 # Tuned down; keeps visuals similar while cutting work + + # Calculate radius ensuring it's at least as large as average point spacing + # and scales appropriately with grid resolution + radius_from_density = avg_point_spacing * 1.5 + radius_from_grid = radius_multiplier * avg_cell_size + + optimal_radius = max(radius_from_density, radius_from_grid, 1e-6) + + return float(optimal_radius) + + +# ============================================================================ +# HELPER FUNCTIONS +# ============================================================================ + +def format_hkl_axis(hkl_vector, tolerance=1e-6, max_denominator=12): + """ + Format a 3-vector in HKL coordinates as a readable string. + + This function converts HKL coordinate vectors into human-readable expressions + with rational number approximation and proper mathematical formatting. + + Parameters: + hkl_vector (array-like): 3-element vector with coefficients for [H, K, L] + tolerance (float): Threshold for considering a coefficient as zero + max_denominator (int): Maximum denominator for rational approximation + + Returns: + str: Formatted expression like "H", "H/2", "H + K", "0.866H + 0.5K", etc. + + Examples: + format_hkl_axis([1, 0, 0]) # Returns "H" + format_hkl_axis([0.5, 1, 0]) # Returns "H/2 + K" + format_hkl_axis([1, 1, 0]) # Returns "H + K" + format_hkl_axis([0, 0, 0.5]) # Returns "L/2" + """ + import numpy as np + from fractions import Fraction + + h, k, l = np.asarray(hkl_vector, dtype=float)[:3] + + def rationalize(x): + """Convert float to rational if close to a simple fraction.""" + if abs(x) < tolerance: + return 0, 1 + try: + frac = Fraction(x).limit_denominator(max_denominator) + if abs(float(frac) - x) < tolerance: + return frac.numerator, frac.denominator + except: + pass + return x, 1 + + def format_term(coeff, label): + """Format a single term like '2H', 'H/3', '-K', etc.""" + if abs(coeff) < tolerance: + return "" + + num, den = rationalize(coeff) + if isinstance(num, (int, np.integer)) and isinstance(den, (int, np.integer)): + if den == 1: + if num == 1: + return label + elif num == -1: + return f"-{label}" + else: + return f"{num}{label}" + else: + if num == 1: + return f"{label}/{den}" + elif num == -1: + return f"-{label}/{den}" + else: + return f"{num}{label}/{den}" + else: + # Fallback to decimal + if abs(coeff - 1.0) < tolerance: + return label + elif abs(coeff + 1.0) < tolerance: + return f"-{label}" + else: + return f"{coeff:.3g}{label}" + + terms = [] + for coeff, label in [(h, 'H'), (k, 'K'), (l, 'L')]: + term = format_term(coeff, label) + if term: + terms.append(term) + + if not terms: + return "0" + + # Join terms with proper signs + result = terms[0] + for term in terms[1:]: + if term.startswith('-'): + result += f" - {term[1:]}" + else: + result += f" + {term}" + + return result + + +def show_slice(mesh, cmap='viridis'): + """ + Convenience function for displaying slices. + + This function provides a simple interface for slice visualization + by delegating to the DashAnalysis.show_slice method. + + Parameters: + mesh: Slice mesh or data to display + cmap (str): Colormap for visualization + + Returns: + Result from DashAnalysis.show_slice + """ + return DashAnalysis().show_slice(mesh, cmap=cmap) diff --git a/viewer/generators.py b/utils/generators.py similarity index 100% rename from viewer/generators.py rename to utils/generators.py diff --git a/utils/hdf5_handler.py b/utils/hdf5_handler.py new file mode 100644 index 0000000..5a49f31 --- /dev/null +++ b/utils/hdf5_handler.py @@ -0,0 +1,527 @@ +""" +HDF5 Handler that writes, reads in nexus standard file format +""" +import h5py +import numpy as np +from pathlib import Path +from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot +import hdf5plugin +from utils.pva_reader import PVAReader +from utils.metadata_converter import convert_files_or_dir +import settings +from pathlib import Path +import toml + + +HDF5_STRUCTURE = { + "nexus": { + "default": { + "NX_class": "NXroot", + "default": "entry", + "entry": { + "NX_class": "NXentry", + "default": "data", + + # --- INSTRUMENT: The 'How' (Source + Detector) --- + "instrument": { + "NX_class": "NXinstrument", + "source": { + "NX_class": "NXsource", + "target": "HKL/SPEC/ENERGY_VALUE", + "units": "keV" + }, + "detector": { + "NX_class": "NXdetector", + "target": "HKL/DETECTOR_SETUP", + "data_link": "/entry/data/data" # Link to raw image stack + } + }, + + # --- SAMPLE: The 'What' (Motor Stacks + Environment) --- + "sample": { + "NX_class": "NXsample", + "ub_matrix": { + "NX_class": "NXcollection", + "target": "HKL/SPEC/UB_MATRIX_VALUE" + }, + # Map your 4/6 circles here + "geometry": { + "NX_class": "NXtransformations", + "sample_phi": {"target": "HKL/SAMPLE_CIRCLE_AXIS_4", "type": "rotation"}, + "sample_chi": {"target": "HKL/SAMPLE_CIRCLE_AXIS_3", "type": "rotation"}, + "sample_eta": {"target": "HKL/SAMPLE_CIRCLE_AXIS_2", "type": "rotation"}, + "sample_mu": {"target": "HKL/SAMPLE_CIRCLE_AXIS_1", "type": "rotation"} + } + }, + + # --- DATA: The 'View' (Plotting Entry Point) --- + "data": { + "NX_class": "NXdata", + "signal": "data", + "data": {"link": "/entry/data/data"} + } + } + }, + "scans": { + "NX_class": "NXroot", + "default": "entry", + "entry": { + "name": "entry", + "NX_class": "NXentry", + "default": "data", + # Nested Groups inside Entry + "instrument": { + "name": "instrument", + "NX_class": "NXinstrument", + "detector": { + "name": "detector", + "NX_class": "NXdetector", + "field": "data", + # HKL.DETECTOR_SETUP + "distance": {"value": None, "units": "mm"}, + "beam_center_x": {"value": None, "units": "pixel"}, + "beam_center_y": {"value": None, "units": "pixel"}, + "pixel_size": {"value": None, "units": "m"}, + # HKL.DETECTOR_CIRCLE_AXIS + "transformations": { + "NX_class": "NXtransformations", + "axis_2": {"value": None, "type": "rotation", "vector": [0, 1, 0]} + } + }, + # HKL.SPEC ENERGY + "source": { + "name": "source", + "NX_class": "NXsource", + "energy": {"value": None, "units": "keV"} + }, + }, + "sample": { + "name": "sample", + "NX_class": "NXsample", + "field": "rotation_angle", + # HKL.SPEC UB_MATRIX + "ub_matrix": {"value": None, "units": "1/angstrom"}, + "orientation_matrix": {"value": None}, + # HKL Orientation Directions + "surface_normal": {"vector": [0, 0, 1]}, + "inplane_reference": {"vector": [1, 0, 0]} + }, + "data": { + "name": "data", + "NX_class": "NXdata", + "signal": "data", + "axes": "rotation_angle" + } + } + }, + + "format": { + "name":"nexus", + "links":{ + "Nexus": "", + "Scan Standard":"", + "DashPVA":"" + } + } + } +} + +class HDF5Handler(QObject): + hdf5_writer_finished = pyqtSignal(str) + def __init__(self, file_path:str="", pva_reader:PVAReader=None, compress:bool=True): + super(HDF5Handler, self).__init__() + self.pva_reader = pva_reader + self.file_path = file_path + self.default_output = Path('~/DashPVA/outputs/scans/demo/nexus_standard_default_format.h5').expanduser() + self.temp_output = Path('~/DashPVA/outputs/scans/demo/temp.h5').expanduser() + self.compress = compress + # Build reverse HKL map from the loaded TOML config (via PVAReader) + self.hkl_reverse_map = {} + if self.pva_reader is not None: + try: + self.hkl_reverse_map = self.parse_toml() + except Exception as e: + print(f"[HDF5Handler] Failed to parse TOML for HKL map: {e}") + + #Loading + def load_data(self): + pass + + @pyqtSlot() + def save_data(self, compress=False, file_path=None, clear_caches=False, is_scan=False): + if file_path is None: + file_path = self.default_output + + self.file_path=file_path + if self.pva_reader is not None: + self.save_from_caches(compress, clear_caches=clear_caches, is_scan=is_scan) + + # Saving + def save_from_caches(self, compress:bool=True, clear_caches:bool=True,is_scan=False): + if is_scan: + self.save_as_scan_format(compress, clear_caches) + else: + self.save_as_default_format(compress, clear_caches) + + def save_as_default_format(self, compress: bool = True, clear_caches: bool = True): + """Save using the same unified structure as utils/metadata_converter.py. + + Layout: + /entry/data/data -> image stack + /entry/data/metadata -> base metadata + /entry/data/metadata/motor_positions -> position PVs + /entry/data/metadata/HKL/... -> hierarchical HKL per config + /entry/data/hkl/qx,qy,qz -> optional caches when present + """ + all_caches = self.pva_reader.get_all_caches(clear_caches=clear_caches) + images = all_caches.get('images') + attributes = all_caches.get('attributes') + rsm = all_caches.get('rsm') + shape = self.pva_reader.get_shape() + + # Align lengths + len_images = len(images or []) + len_attributes = len(attributes or []) + if len_images != len_attributes: + m = min(len_images, len_attributes) + if m > 0: + images = (images or [])[:m] + attributes = (attributes or [])[:m] + len_images = len(images) + len_attributes = len(attributes) + if images is None or len_images == 0: + self.hdf5_writer_finished.emit("Failed to save: Empty image cache") + return + + # Merge metadata across frames: key -> list of values + merged_metadata = {} + for attr in (attributes or []): + for k, v in attr.items(): + if k in ('RSM', 'Analysis'): + continue + merged_metadata.setdefault(k, []).append(v) + + # HKL config from reader + hkl_cfg = getattr(self.pva_reader, 'config', {}).get('HKL', {}) + HKL_IN_CONFIG = bool(hkl_cfg) + + def is_position_pv(pv: str) -> bool: + if not isinstance(pv, str): + return False + return (":Position" in pv) or (".RBV" in pv) or ("_RBV" in pv) + + ds_kwargs = (hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True) if compress else {}) + + with h5py.File(self.file_path, 'w') as h5f: + # /entry/data/data + entry = h5f.create_group('entry') + data_grp = entry.create_group('data') + data_grp.create_dataset('data', data=np.array([np.reshape(img, shape) for img in images]), **ds_kwargs) + + # /entry/data/metadata and motor_positions + metadata_grp = data_grp.create_group('metadata') + motor_pos_grp = metadata_grp.create_group('motor_positions') + for key, values in merged_metadata.items(): + try: + arr = np.array(values) + target = motor_pos_grp if is_position_pv(key) else metadata_grp + if arr.dtype.kind in ('i', 'u', 'f'): + target.create_dataset(key, data=arr) + elif arr.dtype.kind in ('U', 'S', 'O'): + dt = h5py.string_dtype(encoding='utf-8') + target.create_dataset(key, data=arr.astype(dt)) + else: + dt = h5py.string_dtype(encoding='utf-8') + target.create_dataset(key, data=str(values), dtype=dt) + except Exception: + pass + + # /entry/data/metadata/HKL ... per config + if HKL_IN_CONFIG: + hkl_root = metadata_grp.create_group('HKL') + + def _write_from_pv(group, name, pv_key): + vals = merged_metadata.get(pv_key) + if vals is None: + return + arr = np.array(vals) + if arr.dtype.kind in ('i', 'u', 'f'): + group.create_dataset(name, data=arr) + elif arr.dtype.kind in ('U', 'S', 'O'): + dt = h5py.string_dtype(encoding='utf-8') + group.create_dataset(name, data=arr.astype(dt)) + else: + dt = h5py.string_dtype(encoding='utf-8') + group.create_dataset(name, data=str(vals), dtype=dt) + + for section_name in ['PRIMARY_BEAM_DIRECTION', 'INPLANE_REFERENCE_DIRECITON', 'SAMPLE_SURFACE_NORMAL_DIRECITON']: + sec = hkl_cfg.get(section_name, {}) + if sec: + sec_grp = hkl_root.create_group(section_name) + for k, pv in sec.items(): + _write_from_pv(sec_grp, k, pv) + + for base in ['SAMPLE_CIRCLE_AXIS_1', 'SAMPLE_CIRCLE_AXIS_2', 'SAMPLE_CIRCLE_AXIS_3', 'SAMPLE_CIRCLE_AXIS_4', 'DETECTOR_CIRCLE_AXIS_1', 'DETECTOR_CIRCLE_AXIS_2']: + sec = hkl_cfg.get(base, {}) + if sec: + grp = hkl_root.create_group(base) + for k, pv in sec.items(): + _write_from_pv(grp, k, pv) + + spec = hkl_cfg.get('SPEC', {}) + if spec: + spec_grp = hkl_root.create_group('SPEC') + ev_key = spec.get('ENERGY_VALUE') + if ev_key: + vals = merged_metadata.get(ev_key) + if vals is not None: + spec_grp.create_dataset('ENERGY_VALUE', data=np.array(vals)) + ub_key = spec.get('UB_MATRIX_VALUE') + if ub_key: + vals = merged_metadata.get(ub_key) + if vals is not None: + arr = np.asarray(vals).ravel() + ub9 = arr[:9] if arr.size >= 9 else arr + spec_grp.create_dataset('UB_MATRIX_VALUE', data=ub9) + + detector = hkl_cfg.get('DETECTOR_SETUP', {}) + if detector: + det_grp = hkl_root.create_group('DETECTOR_SETUP') + for k, pv in detector.items(): + _write_from_pv(det_grp, k, pv) + + # Optional HKL caches + if HKL_IN_CONFIG and rsm: + try: + if len(rsm[0]) == len_images: + hkl_grp = data_grp.create_group('hkl') + hkl_grp.create_dataset('qx', data=np.array([np.reshape(qx, shape) for qx in rsm[0]]), **ds_kwargs) + hkl_grp.create_dataset('qy', data=np.array([np.reshape(qy, shape) for qy in rsm[1]]), **ds_kwargs) + hkl_grp.create_dataset('qz', data=np.array([np.reshape(qz, shape) for qz in rsm[2]]), **ds_kwargs) + except Exception: + pass + + # Auto-convert metadata structure per current TOML before emitting signal + conversion_suffix = "" + try: + toml_path = settings.ensure_path() + if toml_path: + convert_files_or_dir( + toml_path=toml_path, + hdf5_path=str(self.file_path), + base_group="entry/data/metadata", + include=True, + in_place=True, + recursive=False, + ) + conversion_suffix = " (converted)" + else: + conversion_suffix = " (conversion skipped: no TOML path)" + except Exception as conv_err: + conversion_suffix = f" (conversion failed: {conv_err})" + + self.hdf5_writer_finished.emit(f"Saved to: {self.file_path}\nFormat: unified-structure{conversion_suffix}") + + def save_as_scan_format(self, compress:bool=True, clear_caches:bool=True): + all_caches = self.pva_reader.get_all_caches(clear_caches=clear_caches) + images = all_caches['images'] + attributes = all_caches['attributes'] + rsm = all_caches['rsm'] + shape = self.pva_reader.get_shape() + + nx_conf = HDF5_STRUCTURE['nexus']['scans'] + formatter = HDF5_STRUCTURE['nexus']['format'] + with h5py.File(self.file_path, 'w') as h5_file: + # Set root + h5_file.attrs['NX_class'] = nx_conf['NX_class'] + h5_file.attrs['default'] = nx_conf['default'] + + # Set entry + entry_cfg = nx_conf['entry'] + entry = h5_file.create_group(entry_cfg['name']) + entry.attrs['NX_class'] = entry_cfg['NX_class'] + entry.attrs['default'] = entry_cfg['default'] + + # Set instruments + instr_cfg = entry_cfg['instrument'] + instr_grp = entry.create_group(instr_cfg['name']) + instr_grp.attrs['NX_class'] = instr_cfg['NX_class'] + + det_cfg = instr_cfg['detector'] + det_grp = instr_grp.create_group(det_cfg['name']) + det_grp.attrs['NX_class'] = det_cfg['NX_class'] + + det_grp.create_dataset(det_cfg['field'], + data=np.array([np.reshape(img, shape) for img in images]), + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + + # Write DETECTOR_SETUP attributes grouped per TOML under instrument/detector/DETECTOR_SETUP + try: + det_setup_cfg = getattr(self.pva_reader, 'config', {}).get('HKL', {}).get('DETECTOR_SETUP', {}) + if isinstance(det_setup_cfg, dict) and attributes: + setup_grp = det_grp.create_group('DETECTOR_SETUP') + for field_name, pv_key in det_setup_cfg.items(): + series = [attr.get(pv_key, None) for attr in attributes] + # Choose dtype based on series content + if all((isinstance(v, (int, float, np.number)) or v is None) for v in series): + numeric_series = [float(v) if v is not None else np.nan for v in series] + setup_grp.create_dataset(str(field_name), data=np.array(numeric_series, dtype=np.float64)) + elif all(isinstance(v, str) or v is None for v in series): + dt = h5py.string_dtype(encoding='utf-8') + str_series = [v if v is not None else '' for v in series] + setup_grp.create_dataset(str(field_name), data=np.array(str_series, dtype=dt)) + else: + dt = h5py.string_dtype(encoding='utf-8') + str_series = [str(v) if v is not None else '' for v in series] + setup_grp.create_dataset(str(field_name), data=np.array(str_series, dtype=dt)) + except Exception as e: + print(f"[HDF5Handler] Failed to write DETECTOR_SETUP attributes: {e}") + + # Source (Energy) + src_cfg = instr_cfg['source'] + src_grp = instr_grp.create_group(src_cfg['name']) + src_grp.attrs['NX_class'] = src_cfg['NX_class'] + if src_cfg['energy']['value'] is not None: + en_ds = src_grp.create_dataset('energy', data=src_cfg['energy']['value']) + en_ds.attrs['units'] = src_cfg['energy']['units'] + + # Set sample -- Defines: ROI's, HKL + sample_cfg = entry_cfg['sample'] + sample_grp = entry.create_group(sample_cfg['name']) + sample_grp.attrs['NX_class'] = sample_cfg['NX_class'] + # Create rotation_angle dataset from motor position PVs + primary_axis_values = [] + if attributes: + pos_keys = [k for k in attributes[0].keys() if 'Position' in k] + for attr in attributes: + v = None + for k in pos_keys: + val = attr.get(k) + if isinstance(val, (int, float, np.number)): + v = float(val) + break + primary_axis_values.append(0.0 if v is None else v) + if primary_axis_values: + rot_ds = sample_grp.create_dataset(sample_cfg['field'], data=np.array(primary_axis_values, dtype=np.float64)) + # Units could be degrees if known; skipping units attr due to lack of config + + # Set data -- Where images and motor_positions are added + data_cfg = entry_cfg['data'] + data_grp = entry.create_group(data_cfg['name']) + data_grp.attrs['NX_class'] = data_cfg['NX_class'] + data_grp.attrs['signal'] = data_cfg['signal'] + data_grp.attrs['axes'] = data_cfg['axes'] + + # Crucial for 2D Detectors: Map rotation_angle to the first dimension (index 0) + data_grp.attrs[f"{data_cfg['axes']}_indices"] = 0 + + # Write HKL grouped series under entry/hkl// + hkl_series = self.convert_to_nexus_format() + if hkl_series: + hkl_grp = entry.create_group('hkl') + for grp_name, fields in hkl_series.items(): + subgrp = hkl_grp.create_group(grp_name) + for field_name, series in fields.items(): + if series and isinstance(series[0], str): + dt = h5py.string_dtype(encoding='utf-8') + subgrp.create_dataset(field_name, data=np.array(series, dtype=dt)) + else: + subgrp.create_dataset(field_name, data=np.array(series, dtype=np.float64)) + + data_grp['data'] = h5py.SoftLink(f'/{entry_cfg["name"]}/instrument/detector/data') + data_grp['rotation_angle'] = h5py.SoftLink(f'/{entry_cfg["name"]}/sample/rotation_angle') + self.hdf5_writer_finished.emit(f'\ + Saved to: {self.file_path}\n \ + Format: {formatter["name"]} (conversion skipped for scan-format)\ + ') + + # Info + def get_file_info(self): + pass + + # Parse Toml dict + def parse_toml(self): + """ + Build a reverse HKL map from the loaded TOML configuration. + Returns a dict mapping PV attribute keys -> (group_name, field_name), both lowercased. + + Example: + 'DetectorSetup:Distance' -> ('detector_setup', 'distance') + """ + reverse_map: dict[str, tuple[str, str]] = {} + try: + cfg = getattr(self.pva_reader, 'config', {}) if self.pva_reader is not None else {} + hkl_cfg: dict = cfg.get('HKL', {}) + for group_name, fields in hkl_cfg.items(): + if isinstance(fields, dict): + for field_name, pv_key in fields.items(): + if isinstance(pv_key, str) and pv_key: + reverse_map[pv_key] = (str(group_name).lower(), str(field_name).lower()) + return reverse_map + except Exception as e: + print(f"[HDF5Handler] parse_toml failed: {e}") + return {} + + def get_structured_attr(self, attr): + """ + Group a single frame's attribute dict into structured sections. + + Returns a dict with keys: + - 'hkl': {group: {field: value}} + - 'rois': {roi_name: {dim: value}} + - 'motor_positions': {pv_key: value} # keys containing 'Position' + - 'metadata': {other_key: value} + """ + structured = { + 'hkl': {}, + 'rois': {}, + 'motor_positions': {}, + 'sample_circle_axis_n': {}, + 'detector_circle_axis_n': {}, + 'spec':{}, + } + if not isinstance(attr, dict): + return structured + + # HKL grouping via reverse map + for key, value in attr.items(): + if key in self.hkl_reverse_map: + grp, field = self.hkl_reverse_map[key] + structured['hkl'].setdefault(grp, {})[field] = value + continue + # ROI grouping + if 'ROI' in key: + parts = key.split(':') + if len(parts) >= 3 and parts[1].startswith('ROI'): + roi = parts[1] + dim = parts[2] + structured['rois'].setdefault(roi, {})[dim] = value + continue + # Motor positions (generic) + if 'Position' in key: + structured['motor_positions'][key] = value + continue + # Metadata fallback + structured['metadata'][key] = value + return structured + + # Convert to nexus standard + def convert_to_nexus_format(self): + """ + Prepare HKL grouped series from cached attribute list for writing under entry/hkl//. + Returns: {group: {field: [values...]}} + """ + grouped_series: dict[str, dict[str, list]] = {} + try: + all_caches = self.pva_reader.get_all_caches(clear_caches=False) if self.pva_reader is not None else {'attributes': []} + attributes_list = all_caches.get('attributes', []) + for attr in attributes_list: + # group one frame + grouped = self.get_structured_attr(attr) + for grp, fields in grouped['hkl'].items(): + for field, val in fields.items(): + grouped_series.setdefault(grp, {}).setdefault(field, []).append(val) + return grouped_series + except Exception as e: + print(f"[HDF5Handler] convert_to_nexus_format failed: {e}") + return {} diff --git a/utils/hdf5_loader.py b/utils/hdf5_loader.py new file mode 100644 index 0000000..64fe8c6 --- /dev/null +++ b/utils/hdf5_loader.py @@ -0,0 +1,1566 @@ +import hdf5plugin # Must be imported before h5py to register compression filters +import h5py +import numpy as np +from typing import Tuple, Optional, Union +import os +from pathlib import Path +import traceback +import sys, pathlib +sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) + +class HDF5Loader: + """ + Utility class for loading and saving HDF5 files with 2D and 3D point cloud data + """ + + def __init__(self): + """Initialize the HDF5 loader""" + # ================ FILE HANDLING ======================= # + self.current_file_path = None + self.output_file_location = None + + # ================ DATA STORAGE ======================= # + self.raw_data = None + self.points_2d = None + self.points_3d = None + self.intensities = None + self.images = None + self.volume = None + + # ================ METADATA ======================= # + self.num_images = 0 + self.image_shape = (0, 0) # (height, width) + self.volume_shape = (0, 0, 0) # (depth, height, width) + self.data_type = None # '2d', '3d', 'volume', 'images' + self.file_metadata = {} + + self.hdf5_structure = { + 'entry': 'entry', + 'data': 'entry/data', + 'images': 'entry/data/data', + 'metadata': 'entry/data/metadata', + 'motor_positions': 'entry/data/metadata/motor_positions', + 'rois': 'entry/data/rois', + 'hkl': 'entry/data/hkl', + 'qx': 'entry/data/hkl/qx', + 'qy': 'entry/data/hkl/qy', + 'qz': 'entry/data/hkl/qz', + 'analysis': 'entry/analysis', + 'intensity': 'entry/analysis/intensity', + 'comx': 'entry/analysis/comx', + 'comy': 'entry/analysis/comy' + } + # ================== ERROR ================= # + self.debug_mode = True + self.last_error = '' + self.flatten_intensities = True + self.auto_calculate_bounds = True + self.validate_coordinates = True + self.error_log = [] + self.log_file_path = 'hdf5_loader_errors.txt' + self.coordinates_loaded = False + self.intensities_loaded = False + self.points_assembled = False + + # ================ BASE LOADING METHODS ======================= # + def _load_hdf5_data(self, file_path: str) -> dict: + """ + Base method to load HDF5 file and return raw data structure + + Args: + file_path (str): Path to HDF5 file + + Returns: + dict: Raw data structure from HDF5 file + """ + try: + self.current_file_path = file_path + raw_data = {} + + with h5py.File(file_path, 'r') as f: + # Load all available datasets that we might need + + # Try to load coordinate data (for 3D) + if 'entry/data/hkl/qx' in f: + raw_data['qx'] = f['entry/data/hkl/qx'][:] + if 'entry/data/hkl/qy' in f: + raw_data['qy'] = f['entry/data/hkl/qy'][:] + if 'entry/data/hkl/qz' in f: + raw_data['qz'] = f['entry/data/hkl/qz'][:] + + # Load image data (for both 2D and 3D) + if 'entry/data/data' in f: + data_ds = f['entry/data/data'] + arr = data_ds[()] + raw_data['images'] = arr + if arr.ndim == 3: + raw_data['num_images'] = arr.shape[0] + raw_data['image_shape'] = (arr.shape[1], arr.shape[2]) + elif arr.ndim == 2: + raw_data['num_images'] = 1 + raw_data['image_shape'] = (arr.shape[0], arr.shape[1]) + else: + raw_data['num_images'] = 0 + raw_data['image_shape'] = (0, 0) + + # Load any metadata + raw_data['metadata'] = {} + for group_name in ['entry', 'entry/data', 'entry/data/metadata']: + if group_name in f: + for key, value in f[group_name].attrs.items(): + raw_data['metadata'][f"{group_name}_{key}"] = value + + return raw_data + + except Exception as e: + self._handle_loading_error(e, file_path) + return {} + + def _validate_hdf5_structure(self, data: dict) -> bool: + """ + Validate that the HDF5 file has the expected structure + + Args: + data (dict): Raw data from HDF5 file + + Returns: + bool: True if structure is valid + """ + try: + # Check all required data exists + required_keys = ['qx', 'qy', 'qz', 'images'] + for key in required_keys: + if key not in data or data[key] is None: + return False + + # Check shapes match + qx_shape = data['qx'].shape + qy_shape = data['qy'].shape + qz_shape = data['qz'].shape + images_shape = data['images'].shape + + if not (qx_shape == qy_shape == qz_shape == images_shape): + return False + + # Check not empty + if qx_shape[0] == 0: + return False + + # Store validated info + self.num_images = data['num_images'] + self.original_shape = data['image_shape'] + + return True + + except Exception as e: + return False + + def validate_file(self, file_path: str) -> bool: + """ + Basic file validation - works for any HDF5 file + + Args: + file_path (str): Path to HDF5 file + + Returns: + bool: True if file is a valid HDF5 file + """ + try: + # Basic file checks + if not os.path.exists(file_path): + self.last_error = f"File does not exist: {file_path}" + return False + + if not h5py.is_hdf5(file_path): + self.last_error = f"Not a valid HDF5 file: {file_path}" + return False + + # Try to open and check for basic structure + with h5py.File(file_path, 'r') as f: + # Check for at least image data + if 'entry/data/data' not in f: + self.last_error = "Missing required image data path: entry/data/data" + return False + + return True + + except Exception as e: + self.last_error = f"File validation failed: {e}" + print(self.last_error) + return False + + + def _validate_for_3d(self, data: dict) -> bool: + """ + Validate that loaded data can be used for 3D operations + + Args: + data (dict): Raw data from _load_hdf5_data + + Returns: + bool: True if data is valid for 3D operations + """ + try: + # Check for 3D coordinate data + required_3d = ['qx', 'qy', 'qz', 'images'] + for key in required_3d: + if key not in data or data[key] is None: + return False + + # Check shapes match + shapes = [data['qx'].shape, data['qy'].shape, data['qz'].shape, data['images'].shape] + if not all(shape == shapes[0] for shape in shapes): + return False + + # Check arrays are not empty + if data['qx'].size == 0: + return False + + return True + + except Exception as e: + return False + + def _validate_for_2d(self, data: dict) -> bool: + """ + Validate that loaded data can be used for 2D operations + + Args: + data (dict): Raw data from _load_hdf5_data + + Returns: + bool: True if data is valid for 2D operations + """ + # For 2D, we just need image data + if 'images' not in data or data['images'] is None: + return False + + # Check images have proper dimensions + if len(data['images'].shape) != 3: # (num_images, height, width) + return False + + return True + + # ================ 2D LOADING METHODS ======================= # + def load_h5_to_2d(self, file_path: str) -> Tuple[np.ndarray, np.ndarray, int, Tuple[int, int]]: + """ + Load HDF5 file to 2D points + + Args: + file_path (str): Path to HDF5 file + + Returns: + Tuple containing: + - points (np.ndarray): 2D points array (N, 2) + - intensities (np.ndarray): Intensity values + - num_images (int): Number of images + - shape (Tuple[int, int]): Image dimensions (height, width) + """ + pass + + def load_h5_images_2d(self, file_path: str) -> Tuple[np.ndarray, int, Tuple[int, int]]: + """ + Load HDF5 file as 2D image stack + + Args: + file_path (str): Path to HDF5 file + + Returns: + Tuple containing: + - images (np.ndarray): Image stack (N, H, W) + - num_images (int): Number of images + - shape (Tuple[int, int]): Image dimensions (height, width) + """ + pass + + # ================ 3D LOADING METHODS ======================= # + def load_h5_to_3d(self, file_path: str) -> Tuple[np.ndarray, np.ndarray, int, Tuple[int, int]]: + """ + Load HDF5 file to 3D points + + Args: + file_path (str): Path to HDF5 file + + Returns: + Tuple containing: + - points (np.ndarray): 3D points array (N, 3) + - intensities (np.ndarray): Intensity values + - num_images (int): Number of images + - shape (Tuple[int, int]): Original image dimensions + """ + try: + + # Validate file + if not self.validate_file(file_path): + raise ValueError(f"Invalid file: {self.last_error}") + + # Load raw data + raw_data = self._load_hdf5_data(file_path) + if not raw_data: + raise ValueError("Failed to load data from file") + + # Validate for 3D operations + if not self._validate_for_3d(raw_data): + raise ValueError("File does not contain valid 3D coordinate data") + + # Process coordinate data + qx_flat = self._flatten_coordinate_data(raw_data['qx']) + qy_flat = self._flatten_coordinate_data(raw_data['qy']) + qz_flat = self._flatten_coordinate_data(raw_data['qz']) + + # Check if flattening worked + if len(qx_flat) == 0 or len(qy_flat) == 0 or len(qz_flat) == 0: + raise ValueError("Coordinate flattening resulted in empty arrays") + + # Create 3D points array + points_3d = np.column_stack([qx_flat, qy_flat, qz_flat]) + + # Process intensity data + if self.flatten_intensities: + intensities = np.reshape(raw_data['images'], -1) + else: + intensities = raw_data['images'] + + # Final validation + if points_3d.size == 0: + raise ValueError("Final 3D points array is empty") + + # Store in class variables + self.points_3d = points_3d + self.intensities = intensities + self.num_images = raw_data['num_images'] + self.original_shape = raw_data['image_shape'] + + return (points_3d, intensities, raw_data['num_images'], raw_data['image_shape']) + + except Exception as e: + self._handle_loading_error(e, file_path) + return (np.array([]), np.array([]), 0, (0, 0)) + + def load_h5_volume_3d(self, file_path: str) -> Tuple[np.ndarray, Tuple[int, int, int]]: + """ + Load HDF5 file as 3D volume (or 2D slice if saved that way) using the standard structure. + Reads: + - /entry/data/data as the array + - /entry attrs (e.g., data_type) + - /entry/data attrs (array_rank, array_shape) + - /entry/data/metadata datasets (voxel_spacing, grid_origin, volume_shape, original_shape, etc.) + + Args: + file_path (str): Path to HDF5 file + + Returns: + Tuple containing: + - volume (np.ndarray): 3D volume data (D, H, W) or 2D slice (H, W) + - shape (Tuple[int, int, int]): Volume dimensions (or 2D shape with leading 0) + """ + try: + if not os.path.exists(file_path): + raise FileNotFoundError(f"File does not exist: {file_path}") + if not h5py.is_hdf5(file_path): + raise ValueError(f"Not a valid HDF5 file: {file_path}") + + meta: dict = {} + with h5py.File(file_path, 'r') as f: + # Basic path checks + if 'entry' not in f or 'entry/data' not in f or 'entry/data/data' not in f: + raise ValueError("Required HDF5 paths missing (expected /entry/data/data)") + + entry_grp = f['entry'] + data_grp = f['entry/data'] + data_ds = data_grp['data'] + + # Read array and shape + volume = data_ds[()] # numpy array + vol_shape = volume.shape # 3D for volume; 2D for slice + + # Read attributes for quick discovery + try: + meta['data_type'] = entry_grp.attrs.get('data_type', '') + except Exception: + meta['data_type'] = '' + try: + meta['array_rank'] = int(data_grp.attrs.get('array_rank', volume.ndim)) + except Exception: + meta['array_rank'] = volume.ndim + try: + arr_shape_attr = data_grp.attrs.get('array_shape', np.array(vol_shape, dtype=np.int64)) + meta['array_shape'] = list(np.array(arr_shape_attr, dtype=np.int64).tolist()) + except Exception: + meta['array_shape'] = list(vol_shape) + + # Read metadata datasets if present + voxel_spacing = None + grid_origin = None + original_shape = None + volume_shape_ds = None + if 'entry/data/metadata' in f: + md_grp = f['entry/data/metadata'] + for key in md_grp.keys(): + try: + ds = md_grp[key] + # Try to read as string safely, otherwise as numeric/array + if hasattr(ds, 'asstr'): + val = ds.asstr()[()] + else: + val = ds[()] + # Normalize numpy types to python types + if isinstance(val, np.ndarray): + meta[key] = val.tolist() + elif isinstance(val, (np.generic,)): + meta[key] = val.item() + else: + meta[key] = val + except Exception: + # Fall back to stringified content if any issue + try: + meta[key] = str(md_grp[key][()]) + except Exception: + pass + voxel_spacing = meta.get('voxel_spacing', None) + grid_origin = meta.get('grid_origin', None) + original_shape = meta.get('original_shape', None) + volume_shape_ds = meta.get('volume_shape', None) + + # Store in instance for downstream consumers + self.volume = volume + # Always store a 3-tuple for volume_shape; if 2D, prefix with 0 + if volume.ndim == 3: + self.volume_shape = tuple(int(x) for x in vol_shape) + elif volume.ndim == 2: + self.volume_shape = (0, int(vol_shape[0]), int(vol_shape[1])) + else: + self.volume_shape = (0, 0, 0) + # Keep file-level metadata for access by caller + self.file_metadata = { + 'data_type': (meta.get('data_type') or ''), + 'array_rank': meta.get('array_rank'), + 'array_shape': meta.get('array_shape'), + 'voxel_spacing': voxel_spacing, + 'grid_origin': grid_origin, + 'original_shape': original_shape, + 'volume_shape': volume_shape_ds, + 'num_images': meta.get('num_images', None), + # Persisted grid reconstruction hints + 'array_order': meta.get('array_order', None), + 'grid_dimensions_cells': meta.get('grid_dimensions_cells', None), + 'axes_labels': meta.get('axes_labels', None), + 'intensity_range': meta.get('intensity_range', None), + } + + # Return volume and a 3D shape; for 2D, return (0, H, W) so callers can detect + return (volume, self.volume_shape) + except Exception as e: + self._handle_loading_error(e, file_path) + return (np.array([]), (0, 0, 0)) + + def load_h5_with_coordinates(self, file_path: str) -> Tuple[np.ndarray, np.ndarray, dict]: + """ + Load HDF5 file with coordinate transformations (qx, qy, qz) + + Args: + file_path (str): Path to HDF5 file + + Returns: + Tuple containing: + - points (np.ndarray): Transformed 3D points + - intensities (np.ndarray): Intensity values + - metadata (dict): Additional metadata from file + """ + try: + # Use the main 3D loader + points, intensities, num_images, shape = self.load_h5_to_3d(file_path) + + # Load raw data again to get metadata + raw_data = self._load_hdf5_data(file_path) + + # Prepare metadata dictionary + metadata = { + 'num_images': num_images, + 'original_shape': shape, + 'coordinate_bounds': self.bounds_3d.copy(), + 'file_metadata': raw_data.get('metadata', {}), + 'coordinate_arrays': { + 'qx_shape': raw_data['qx'].shape, + 'qy_shape': raw_data['qy'].shape, + 'qz_shape': raw_data['qz'].shape + } + } + + return (points, intensities, metadata) + + except Exception as e: + self._handle_loading_error(e, file_path) + return (np.array([]), np.array([]), {}) + + def load_vti_volume_3d(self, file_path: str, scalar_name: Optional[str] = None, prefer_cell_data: bool = True) -> Tuple[np.ndarray, Tuple[int, int, int]]: + """ + Load a .vti (VTK XML ImageData) file and return a cell-centered 3D numpy volume (D, H, W). + No saving performed. Populates self.volume, self.volume_shape, and self.file_metadata. + + Args: + file_path: Path to the .vti file + scalar_name: Optional name of the scalar to use; if None, uses active or first available + prefer_cell_data: Prefer cell_data arrays (best for slicing); if False and only point_data exists, will convert + + Returns: + (volume, shape): numpy array (D,H,W) and shape tuple + """ + try: + import pyvista as pv # Lazy import to keep dependency optional + self.current_file_path = file_path + + if not os.path.exists(file_path): + raise FileNotFoundError(f"File does not exist: {file_path}") + + grid = pv.read(file_path) + + # Select scalar from cell_data (preferred) or point_data + chosen_name = None + intens_1d = None + target_obj_for_dims = grid # object whose dimensions we use + + cell_keys = list(grid.cell_data.keys()) if hasattr(grid, "cell_data") else [] + point_keys = list(grid.point_data.keys()) if hasattr(grid, "point_data") else [] + + def _pick_name(keys: list, active: Optional[str], requested: Optional[str]) -> Optional[str]: + if requested and requested in keys: + return requested + if active and active in keys: + return active + return keys[0] if keys else None + + if prefer_cell_data and cell_keys: + chosen_name = _pick_name(cell_keys, getattr(grid, "active_scalars_name", None), scalar_name) + if not chosen_name: + raise ValueError("No scalar arrays found in VTI cell_data") + intens_1d = np.asarray(grid.cell_data[chosen_name]) + target_obj_for_dims = grid + else: + # Try point_data first and convert to cell_data for consistent D×H×W slicing + if point_keys: + chosen_name = _pick_name(point_keys, getattr(grid, "active_scalars_name", None), scalar_name) + if not chosen_name: + raise ValueError("No scalar arrays found in VTI point_data") + v_cd = grid.point_data_to_cell_data(pass_point_data=False) + intens_1d = np.asarray(v_cd.cell_data[chosen_name]) + target_obj_for_dims = v_cd + elif cell_keys: + # Fallback to cell_data if point_data missing + chosen_name = _pick_name(cell_keys, getattr(grid, "active_scalars_name", None), scalar_name) + intens_1d = np.asarray(grid.cell_data[chosen_name]) + target_obj_for_dims = grid + else: + raise ValueError("No scalar arrays found in VTI (point_data or cell_data)") + + # Derive cell-centered dimensions from points-based dimensions (cells = points - 1) + dims_points = tuple(int(d) for d in getattr(target_obj_for_dims, "dimensions", (0, 0, 0))) + dims_cells = tuple(max(d - 1, 0) for d in dims_points) + + expected = int(np.prod(dims_cells)) if all(d > 0 for d in dims_cells) else 0 + if expected <= 0: + raise ValueError(f"Invalid VTI dimensions (points={dims_points}, cells={dims_cells})") + if intens_1d.size != expected: + # Attempt robust conversion via point_data_to_cell_data if mismatch + try: + v_cd = target_obj_for_dims.point_data_to_cell_data(pass_point_data=False) if hasattr(target_obj_for_dims, "point_data_to_cell_data") else grid.point_data_to_cell_data(pass_point_data=False) + intens_1d = np.asarray(v_cd.cell_data[chosen_name]) + dims_points = tuple(int(d) for d in getattr(v_cd, "dimensions", (0, 0, 0))) + dims_cells = tuple(max(d - 1, 0) for d in dims_points) + expected = int(np.prod(dims_cells)) if all(d > 0 for d in dims_cells) else 0 + except Exception: + pass + if intens_1d.size != expected: + raise ValueError(f"Scalar size {intens_1d.size} does not match expected cells product {expected}") + + # Reshape to D×H×W using Fortran order to match VTK/PyVista layout (x-fastest, z-slowest) + volume = intens_1d.reshape(dims_cells, order="F").astype(np.float32) + + # Store in instance + self.volume = volume + if len(dims_cells) == 3: + self.volume_shape = (int(dims_cells[2]), int(dims_cells[1]), int(dims_cells[0])) if False else tuple(int(x) for x in dims_cells) # keep (D,H,W) + elif len(dims_cells) == 2: + self.volume_shape = (0, int(dims_cells[0]), int(dims_cells[1])) + else: + self.volume_shape = (0, 0, 0) + + spacing = getattr(grid, "spacing", (1.0, 1.0, 1.0)) + origin = getattr(grid, "origin", (0.0, 0.0, 0.0)) + intensity_range = [float(np.min(volume)), float(np.max(volume))] if volume.size > 0 else [0.0, 0.0] + + # Populate metadata to mirror HDF5 loader expectations + self.file_metadata = { + "data_type": "volume", + "array_rank": int(volume.ndim), + "array_shape": list(volume.shape), + "voxel_spacing": [float(spacing[0]), float(spacing[1]), float(spacing[2])], + "grid_origin": [float(origin[0]), float(origin[1]), float(origin[2])], + "original_shape": [int(volume.shape[1]), int(volume.shape[2])] if volume.ndim == 3 else ([int(volume.shape[0]), int(volume.shape[1])] if volume.ndim == 2 else [0, 0]), + "volume_shape": list(volume.shape) if volume.ndim == 3 else None, + "num_images": 1, + "array_order": "F", + "grid_dimensions_cells": [int(x) for x in dims_cells], + "axes_labels": ["H", "K", "L"], + "intensity_range": intensity_range, + "scalar_name": chosen_name, + } + + return (volume, self.volume_shape) + except Exception as e: + self._handle_loading_error(e, file_path) + return (np.array([]), (0, 0, 0)) + + def load_volume_auto(self, file_path: str, scalar_name: Optional[str] = None) -> Tuple[np.ndarray, Tuple[int, int, int]]: + """ + Convenience loader that accepts .h5/.hdf5 or .vti and routes to the appropriate method. + No saving performed. + + Args: + file_path: Path to input file (.h5/.hdf5 or .vti) + scalar_name: Optional scalar name for .vti inputs + + Returns: + (volume, shape): numpy array and shape tuple + """ + try: + ext = str(Path(file_path).suffix).lower() + if ext in (".h5", ".hdf5"): + return self.load_h5_volume_3d(file_path) + elif ext == ".vti": + return self.load_vti_volume_3d(file_path, scalar_name=scalar_name) + else: + raise ValueError(f"Unsupported file extension: {ext}") + except Exception as e: + self._handle_loading_error(e, file_path) + return (np.array([]), (0, 0, 0)) + + # ================ HELPER METHODS FOR 3D LOADING ======================= # + def _flatten_coordinate_data(self, coord_array: np.ndarray) -> np.ndarray: + """ + Flatten coordinate array from (num_images, height, width) to (N,) + + Args: + coord_array (np.ndarray): Coordinate array to flatten + + Returns: + np.ndarray: Flattened coordinate array + """ + try: + # Concatenate all images into single array + flattened_list = [] + for i in range(coord_array.shape[0]): + flattened_list.append(np.reshape(coord_array[i], -1)) + + return np.concatenate(flattened_list) + + except Exception as e: + if self.debug_mode: + print(f"Error flattening coordinate data: {e}") + return np.array([]) + + def _calculate_3d_bounds(self) -> None: + """ + Calculate and store 3D bounds from loaded points + """ + try: + if self.points_3d is not None and len(self.points_3d) > 0: + self.bounds_3d['x_min'] = np.min(self.points_3d[:, 0]) + self.bounds_3d['x_max'] = np.max(self.points_3d[:, 0]) + self.bounds_3d['y_min'] = np.min(self.points_3d[:, 1]) + self.bounds_3d['y_max'] = np.max(self.points_3d[:, 1]) + self.bounds_3d['z_min'] = np.min(self.points_3d[:, 2]) + self.bounds_3d['z_max'] = np.max(self.points_3d[:, 2]) + + except Exception as e: + if self.debug_mode: + print(f"Error calculating 3D bounds: {e}") + + + # ================ SAVING METHODS ======================= # + def save_vol_to_h5(self, file_path: str, points: np.ndarray, intensities: np.ndarray, + metadata: Optional[dict] = None) -> bool: + """ + Save volume data to HDF5 file + + Args: + file_path (str): Output file path + points (np.ndarray): Point coordinates + intensities (np.ndarray): Intensity values + metadata (dict, optional): Additional metadata to save + + Returns: + bool: True if save successful + """ + try: + # Validate inputs + if points is None or points.size == 0: + raise ValueError("Points array cannot be empty") + + if intensities is None or intensities.size == 0: + raise ValueError("Intensities array cannot be empty") + + if len(points) != len(intensities): + raise ValueError("Points and intensities must have the same length") + + # Prepare metadata + if metadata is None: + metadata = {} + + # Add default metadata + default_metadata = { + 'num_points': len(points), + 'point_dimensions': points.shape[1], + 'data_type': 'points', + 'creation_timestamp': str(np.datetime64('now')), + 'source_file': getattr(self, 'current_file_path', 'unknown') + } + + merged_metadata = {**default_metadata, **metadata} + + with h5py.File(file_path, 'w') as h5f: + # Create main structure using hdf5_structure paths + print(f'Creating file at: {file_path}') + + # Create entry group + entry_grp = h5f.create_group(self.hdf5_structure['entry']) + + # Create data group + data_grp = entry_grp.create_group(self.hdf5_structure['data'].split('/')[-1]) + + # Save intensities using standard images path + data_grp.create_dataset( + self.hdf5_structure['images'].split('/')[-1], + data=intensities.reshape(-1, 1), + dtype=np.float32, + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True) + ) + print('Intensity data written to standard images path') + + # Create HKL subgroup using standard structure + hkl_grp = data_grp.create_group(self.hdf5_structure['hkl'].split('/')[-1]) + + # Save coordinates using standard HKL paths + hkl_grp.create_dataset( + self.hdf5_structure['qx'].split('/')[-1], + data=points[:, 0], + dtype=np.float32, + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True) + ) + hkl_grp.create_dataset( + self.hdf5_structure['qy'].split('/')[-1], + data=points[:, 1], + dtype=np.float32, + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True) + ) + hkl_grp.create_dataset( + self.hdf5_structure['qz'].split('/')[-1], + data=points[:, 2], + dtype=np.float32, + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True) + ) + print('HKL coordinates written to standard paths') + + # Create metadata group using standard structure + metadata_grp = data_grp.create_group(self.hdf5_structure['metadata'].split('/')[-1]) + print('Metadata group created using standard path') + + # Save metadata + for key, value in merged_metadata.items(): + try: + if isinstance(value, (int, float, np.number)): + metadata_grp.create_dataset(key, data=value) + elif isinstance(value, str): + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=value, dtype=dt) + elif isinstance(value, (list, tuple, np.ndarray)): + # Handle arrays/lists + if len(value) > 0: + if all(isinstance(v, (int, float, np.number)) for v in value): + metadata_grp.create_dataset(key, data=np.array(value)) + elif all(isinstance(v, str) for v in value): + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=np.array(value, dtype=dt)) + else: + # Mixed types, convert to string + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=str(value), dtype=dt) + else: + # Convert to string for complex objects + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=str(value), dtype=dt) + except Exception as e: + print(f"Warning: Could not save metadata key '{key}': {e}") + + print('Metadata saved') + + # Add file-level attributes + entry_grp.attrs['data_type'] = '3d_point_cloud' + entry_grp.attrs['num_points'] = len(points) + entry_grp.attrs['point_dimensions'] = points.shape[1] + + # Add data group attributes + data_grp.attrs['coordinate_system'] = 'hkl' + data_grp.attrs['units'] = 'reciprocal_space' + + print(f"3D point cloud successfully saved using standard HDF5 structure") + print(f"Saved {len(points)} points with 3D coordinates") + print(f"Structure paths used:") + print(f" Entry: {self.hdf5_structure['entry']}") + print(f" Data: {self.hdf5_structure['data']}") + print(f" Images: {self.hdf5_structure['images']}") + print(f" HKL: {self.hdf5_structure['hkl']}") + print(f" QX: {self.hdf5_structure['qx']}") + print(f" QY: {self.hdf5_structure['qy']}") + print(f" QZ: {self.hdf5_structure['qz']}") + print(f" Metadata: {self.hdf5_structure['metadata']}") + + return True + + except Exception as e: + error_msg = f"Failed to save point cloud to {file_path}: {e}" + print(error_msg) + self._handle_saving_error(e, file_path) + return False + + + def save_images_to_h5(self, file_path: str, images: np.ndarray, + coordinates: Optional[dict] = None, metadata: Optional[dict] = None) -> bool: + """ + Save image stack to HDF5 file + + Args: + file_path (str): Output file path + images (np.ndarray): Image stack + coordinates (dict, optional): Coordinate transformation data (qx, qy, qz) + metadata (dict, optional): Additional metadata + + Returns: + bool: True if save successful + """ + pass + + def save_vol_to_h5(self, file_path: str, volume: np.ndarray, + metadata: Optional[dict] = None) -> bool: + """ + Save 3D volume (or 2D slice) to HDF5 file using standard structure. + Writes the array to /entry/data/data and metadata to /entry/data/metadata. + + Args: + file_path (str): Output file path + volume (np.ndarray): Volume array. Shape should be: + - (D, H, W) for 3D volumes + - (H, W) for 2D slices + metadata (dict, optional): Additional metadata to save. Will be merged + with defaults (including data_type). + Returns: + bool: True if save successful + """ + try: + if volume is None or volume.size == 0: + raise ValueError("Volume array cannot be empty") + if volume.ndim not in (2, 3): + raise ValueError(f"Volume must be 2D or 3D, got ndim={volume.ndim}") + + # Prepare metadata and infer data_type if not provided + meta = {} if metadata is None else dict(metadata) + inferred_type = 'volume' if volume.ndim == 3 else 'slice' + meta.setdefault('data_type', inferred_type) + meta.setdefault('creation_timestamp', str(np.datetime64('now'))) + meta.setdefault('source_file', getattr(self, 'current_file_path', 'unknown')) + if volume.ndim == 3: + meta.setdefault('volume_shape', tuple(int(x) for x in volume.shape)) + else: + meta.setdefault('slice_shape', tuple(int(x) for x in volume.shape)) + + # Create HDF5 structure and write data + with h5py.File(file_path, 'w') as h5f: + # /entry + entry_grp = h5f.create_group(self.hdf5_structure['entry']) + # /entry/data + data_grp = entry_grp.create_group(self.hdf5_structure['data'].split('/')[-1]) + # /entry/data/data -> write as float32 for consistency + data_ds_name = self.hdf5_structure['images'].split('/')[-1] + data_grp.create_dataset(data_ds_name, data=volume.astype(np.float32)) + + # /entry/data/metadata + metadata_grp = data_grp.create_group(self.hdf5_structure['metadata'].split('/')[-1]) + for key, value in meta.items(): + try: + if isinstance(value, (int, float, np.number)): + metadata_grp.create_dataset(key, data=value) + elif isinstance(value, str): + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=value, dtype=dt) + elif isinstance(value, (list, tuple, np.ndarray)): + if len(value) > 0: + if all(isinstance(v, (int, float, np.number)) for v in value): + metadata_grp.create_dataset(key, data=np.array(value)) + elif all(isinstance(v, str) for v in value): + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=np.array(value, dtype=dt)) + else: + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=str(value), dtype=dt) + else: + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=str(value), dtype=dt) + except Exception as e: + print(f"Warning: Could not save metadata key '{key}': {e}") + + # Attributes for quick discovery + entry_grp.attrs['data_type'] = meta.get('data_type', inferred_type) + data_grp.attrs['array_rank'] = volume.ndim + data_grp.attrs['array_shape'] = np.array(volume.shape, dtype=np.int64) + + print(f"{meta.get('data_type', inferred_type).capitalize()} successfully saved to {file_path}") + print(f"Structure paths used:") + print(f" Entry: {self.hdf5_structure['entry']}") + print(f" Data: {self.hdf5_structure['data']}") + print(f" Images: {self.hdf5_structure['images']}") + print(f" Metadata: {self.hdf5_structure['metadata']}") + return True + except Exception as e: + error_msg = f"Failed to save volume to {file_path}: {e}" + print(error_msg) + self._handle_saving_error(e, file_path) + return False + + def extract_slice(self, file_path: str, points: np.ndarray, intensities: np.ndarray, + metadata: Optional[dict] = None, shape: Optional[Tuple[int, int]] = None) -> bool: + """ + Save a 2D slice derived from scattered 3D points into an HDF5 file, keeping the structure + consistent with HDF5Writer.save_caches_to_h5: + - /entry/data/data -> 2D image (H, W) + - /entry/data/hkl/qx,qy,qz -> 2D grids of coordinates (H, W) + - /entry/data/metadata -> slice and provenance metadata + + Args: + file_path: Output HDF5 path + points: (N, 3) slice points in 3D + intensities: (N,) intensity values + metadata: dict containing at least 'slice_normal' and 'slice_origin' if available + shape: desired 2D shape (H, W) for the slice image; if None, inferred + + Returns: + True on success, False otherwise + """ + try: + if points is None or points.size == 0: + raise ValueError("Slice points array cannot be empty") + if intensities is None or intensities.size == 0: + raise ValueError("Slice intensities array cannot be empty") + if points.shape[0] != intensities.shape[0]: + raise ValueError("Points and intensities must have the same number of elements") + if points.shape[1] != 3: + raise ValueError("Points must be 3D (N, 3)") + + meta = {} if metadata is None else dict(metadata) + + # Plane basis from metadata or fallback + n = np.array(meta.get('slice_normal', [0.0, 0.0, 1.0]), dtype=float) + n_norm = np.linalg.norm(n) + if not np.isfinite(n_norm) or n_norm <= 0.0: + n = np.array([0.0, 0.0, 1.0], dtype=float) + else: + n = n / n_norm + o = np.array(meta.get('slice_origin', [0.0, 0.0, 0.0]), dtype=float) + if not np.all(np.isfinite(o)): + # Fallback to centroid of points + o = np.mean(points, axis=0) + + # Choose an axis not parallel to n to build in-plane basis u,v + world_axes = [np.array([1.0, 0.0, 0.0]), np.array([0.0, 1.0, 0.0]), np.array([0.0, 0.0, 1.0])] + ref = world_axes[0] + for ax in world_axes: + if abs(float(np.dot(ax, n))) < 0.9: + ref = ax + break + u = np.cross(n, ref) + u_norm = np.linalg.norm(u) + if not np.isfinite(u_norm) or u_norm <= 0.0: + # Fallback if numerical degeneracy + ref = np.array([0.0, 1.0, 0.0]) + u = np.cross(n, ref) + u_norm = np.linalg.norm(u) + if not np.isfinite(u_norm) or u_norm <= 0.0: + # Absolute fallback + u = np.array([1.0, 0.0, 0.0]) + u_norm = 1.0 + u = u / u_norm + v = np.cross(n, u) + v_norm = np.linalg.norm(v) + if not np.isfinite(v_norm) or v_norm <= 0.0: + v = np.array([0.0, 1.0, 0.0]) + + # Project points onto plane coordinates (U,V) + rel = points - o[None, :] + U = rel.dot(u) + V = rel.dot(v) + + # Determine slice image shape (H, W) + preferred_shape = None + # Prefer provided shape + if shape and isinstance(shape, (tuple, list)) and len(shape) == 2 and int(shape[0]) > 0 and int(shape[1]) > 0: + preferred_shape = (int(shape[0]), int(shape[1])) + else: + # Fallback to metadata original_shape if valid + orig = meta.get('original_shape', None) + if isinstance(orig, (tuple, list)) and len(orig) == 2 and int(orig[0]) > 0 and int(orig[1]) > 0: + preferred_shape = (int(orig[0]), int(orig[1])) + # Final fallback to a reasonable default + if preferred_shape is None: + preferred_shape = (512, 512) + H, W = preferred_shape + + # Compute bin edges over U,V extents + U_min, U_max = float(np.min(U)), float(np.max(U)) + V_min, V_max = float(np.min(V)), float(np.max(V)) + # Avoid zero-size ranges + if not np.isfinite(U_min) or not np.isfinite(U_max) or U_max == U_min: + U_min, U_max = -0.5, 0.5 + if not np.isfinite(V_min) or not np.isfinite(V_max) or V_max == V_min: + V_min, V_max = -0.5, 0.5 + + # Bin points into HxW grid, averaging intensities per bin + du = (U_max - U_min) / float(W) + dv = (V_max - V_min) / float(H) + # Guard against zero or NaN + if not np.isfinite(du) or du <= 0.0: + du = 1.0 / float(max(W, 1)) + if not np.isfinite(dv) or dv <= 0.0: + dv = 1.0 / float(max(H, 1)) + iu = np.floor((U - U_min) / du).astype(int) + iv = np.floor((V - V_min) / dv).astype(int) + iu = np.clip(iu, 0, W - 1) + iv = np.clip(iv, 0, H - 1) + + image_sum = np.zeros((H, W), dtype=np.float64) + image_cnt = np.zeros((H, W), dtype=np.int64) + # Accumulate + for k in range(points.shape[0]): + image_sum[iv[k], iu[k]] += float(intensities[k]) + image_cnt[iv[k], iu[k]] += 1 + # Average; fill empty bins with 0.0 + image = np.zeros((H, W), dtype=np.float32) + nonzero = image_cnt > 0 + image[nonzero] = (image_sum[nonzero] / image_cnt[nonzero]).astype(np.float32) + image[~nonzero] = 0.0 + + # Build qx,qy,qz grids at bin centers + Uc = U_min + (np.arange(W, dtype=np.float64) + 0.5) * du + Vc = V_min + (np.arange(H, dtype=np.float64) + 0.5) * dv + # Broadcast to HxW + U_grid = np.broadcast_to(Uc[None, :], (H, W)) + V_grid = np.broadcast_to(Vc[:, None], (H, W)) + # q = o + U*u + V*v + qx = (o[0] + U_grid * u[0] + V_grid * v[0]).astype(np.float32) + qy = (o[1] + U_grid * u[1] + V_grid * v[1]).astype(np.float32) + qz = (o[2] + U_grid * u[2] + V_grid * v[2]).astype(np.float32) + + # Prepare metadata consistent with writer + intensity_range = [float(np.min(image)), float(np.max(image))] + meta.setdefault('data_type', 'slice') + meta.setdefault('extraction_timestamp', str(np.datetime64('now'))) + meta.setdefault('num_points', int(points.shape[0])) + meta.setdefault('image_shape', [int(H), int(W)]) + meta.setdefault('slice_normal', [float(n[0]), float(n[1]), float(n[2])]) + meta.setdefault('slice_origin', [float(o[0]), float(o[1]), float(o[2])]) + meta.setdefault('u_axis', [float(u[0]), float(u[1]), float(u[2])]) + meta.setdefault('v_axis', [float(v[0]), float(v[1]), float(v[2])]) + meta.setdefault('u_range', [float(U_min), float(U_max)]) + meta.setdefault('v_range', [float(V_min), float(V_max)]) + meta.setdefault('intensity_range', intensity_range) + + # Write HDF5 file: /entry/data/data, /entry/data/hkl/{qx,qy,qz}, /entry/data/metadata + with h5py.File(file_path, 'w') as h5f: + entry_grp = h5f.create_group(self.hdf5_structure['entry']) + data_grp = entry_grp.create_group(self.hdf5_structure['data'].split('/')[-1]) + # Image data + data_ds_name = self.hdf5_structure['images'].split('/')[-1] + data_grp.create_dataset(data_ds_name, data=image.astype(np.float32), + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + # HKL 2D grids + hkl_grp = data_grp.create_group(self.hdf5_structure['hkl'].split('/')[-1]) + hkl_grp.create_dataset(self.hdf5_structure['qx'].split('/')[-1], data=qx, + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + hkl_grp.create_dataset(self.hdf5_structure['qy'].split('/')[-1], data=qy, + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + hkl_grp.create_dataset(self.hdf5_structure['qz'].split('/')[-1], data=qz, + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + # Metadata + metadata_grp = data_grp.create_group(self.hdf5_structure['metadata'].split('/')[-1]) + for key, value in meta.items(): + try: + if isinstance(value, (int, float, np.number)): + metadata_grp.create_dataset(key, data=value) + elif isinstance(value, str): + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=value, dtype=dt) + elif isinstance(value, (list, tuple, np.ndarray)): + arr = np.array(value) + if arr.dtype.kind in ('i', 'u', 'f'): + metadata_grp.create_dataset(key, data=arr) + elif arr.dtype.kind in ('U', 'S', 'O'): + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=arr.astype(dt)) + else: + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=str(value), dtype=dt) + else: + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=str(value), dtype=dt) + except Exception as e: + print(f"Warning: Could not save metadata key '{key}': {e}") + + # Attributes + entry_grp.attrs['data_type'] = 'slice' + data_grp.attrs['array_rank'] = 2 + data_grp.attrs['array_shape'] = np.array([H, W], dtype=np.int64) + + print(f"Slice successfully saved to {file_path} (shape {H}x{W})") + print(f"Structure paths used:") + print(f" Entry: {self.hdf5_structure['entry']}") + print(f" Data: {self.hdf5_structure['data']}") + print(f" Images: {self.hdf5_structure['images']}") + print(f" HKL: {self.hdf5_structure['hkl']} -> qx,qy,qz grids") + print(f" Metadata: {self.hdf5_structure['metadata']}") + return True + except Exception as e: + error_msg = f"Failed to save slice to {file_path}: {e}" + print(error_msg) + self._handle_saving_error(e, file_path) + return False + + # ================ UTILITY METHODS ======================= # + def get_file_info(self, file_path: str, *, style: str = "text", include_unknown: bool = True, + float_precision: int = 6, summarize_datasets: bool = True, raw: bool = False): + """ + Inspect an HDF5 file and return a formatted summary. + + Args: + file_path: Path to HDF5 file + style: "text" (default) for human-readable string, or "dict" for a grouped dict. + include_unknown: Include unrecognized metadata keys under 'other_metadata'. + float_precision: Decimal places for float formatting in text output. + summarize_datasets: If True, highlight /entry/data/data and summarize others; else list all. + raw: If True, return the original raw dict (backward compatibility). + + Returns: + - If raw=True: the original raw dict identical to prior implementation. + - If style="text": a formatted multiline string. + - If style="dict": a grouped dict with sections. + """ + # Step 1: Build the raw info (same schema as before) + info = { + 'valid': False, + 'data_type': '', + 'paths': [], + 'shapes': {}, + 'dtypes': {}, + 'entry_attrs': {}, + 'data_attrs': {}, + 'metadata': {} + } + try: + if not os.path.exists(file_path) or not h5py.is_hdf5(file_path): + return info if raw else self._format_file_info(info, style=style, include_unknown=include_unknown, + float_precision=float_precision, summarize_datasets=summarize_datasets) + + with h5py.File(file_path, 'r') as f: + info['valid'] = True + # entry attrs + if 'entry' in f: + for k, v in f['entry'].attrs.items(): + info['entry_attrs'][k] = v + info['data_type'] = str(info['entry_attrs'].get('data_type', '')) + # data attrs + if 'entry/data' in f: + for k, v in f['entry/data'].attrs.items(): + info['data_attrs'][k] = v + # metadata datasets + if 'entry/data/metadata' in f: + md_grp = f['entry/data/metadata'] + for key in md_grp.keys(): + try: + ds = md_grp[key] + if hasattr(ds, 'asstr'): + val = ds.asstr()[()] + else: + val = ds[()] + if isinstance(val, np.ndarray): + info['metadata'][key] = val.tolist() + elif isinstance(val, (np.generic,)): + info['metadata'][key] = val.item() + else: + info['metadata'][key] = val + except Exception: + pass + # datasets enumeration + def visitor(name, obj): + if isinstance(obj, h5py.Dataset): + p = '/' + name + info['paths'].append(p) + info['shapes'][p] = obj.shape + info['dtypes'][p] = str(obj.dtype) + f.visititems(visitor) + + # Step 2: Return raw if requested + if raw: + return info + + # Step 3: Return formatted output + return self._format_file_info(info, style=style, include_unknown=include_unknown, + float_precision=float_precision, summarize_datasets=summarize_datasets) + + except Exception as e: + # Also log to error file + try: + with open(self.log_file_path, 'a') as f: + f.write("==== HDF5 Inspect Error ====\n") + f.write(f"File Path: {file_path}\n") + f.write(f"Error: {repr(e)}\n") + f.write("Traceback:\n") + f.write(traceback.format_exc()) + f.write("\n") + except Exception: + pass + return info if raw else self._format_file_info(info, style=style, include_unknown=include_unknown, + float_precision=float_precision, summarize_datasets=summarize_datasets) + + # Internal helper to format file info; kept private within this class. + def _format_file_info(self, info: dict, *, style: str = "text", include_unknown: bool = True, + float_precision: int = 6, summarize_datasets: bool = True): + # Helper conversions + def _to_native(x): + try: + if isinstance(x, bytes): + return x.decode('utf-8', errors='ignore') + if isinstance(x, (np.generic,)): + return x.item() + if isinstance(x, np.ndarray): + return [_to_native(v) for v in x.tolist()] + if isinstance(x, (list, tuple)): + return [_to_native(v) for v in x] + return x + except Exception: + return x + + def _fmt_num(v): + try: + fv = float(v) + return f"{fv:.{int(float_precision)}f}" + except Exception: + return str(v) + + def _fmt_value(v): + v = _to_native(v) + if isinstance(v, float): + return _fmt_num(v) + if isinstance(v, (list, tuple)): + return "[" + ", ".join(_fmt_value(x) for x in v) + "]" + return str(v) + + # Extract common fields + md = {k: _to_native(v) for k, v in (info.get('metadata') or {}).items()} + entry_attrs = {k: _to_native(v) for k, v in (info.get('entry_attrs') or {}).items()} + data_attrs = {k: _to_native(v) for k, v in (info.get('data_attrs') or {}).items()} + + # Derive summary + array_rank = data_attrs.get('array_rank', None) + array_shape = data_attrs.get('array_shape', None) + if isinstance(array_shape, np.ndarray): + array_shape = array_shape.tolist() + # grid/volume keys of interest + voxel_spacing = md.get('voxel_spacing', None) + grid_origin = md.get('grid_origin', None) + grid_cells = md.get('grid_dimensions_cells', None) + grid_points = None + try: + if isinstance(grid_cells, (list, tuple)) and len(grid_cells) == 3: + grid_points = [int(grid_cells[0]) + 1, int(grid_cells[1]) + 1, int(grid_cells[2]) + 1] + except Exception: + grid_points = None + array_order = md.get('array_order', None) + axes_labels = md.get('axes_labels', None) + volume_shape = md.get('volume_shape', None) + original_shape = md.get('original_shape', None) + intensity_range = md.get('intensity_range', None) + num_images = md.get('num_images', None) + + # Build datasets summary + datasets = {} + paths = info.get('paths') or [] + shapes = info.get('shapes') or {} + dtypes = info.get('dtypes') or {} + primary_path = '/entry/data/data' + if primary_path in paths: + datasets[primary_path] = { + 'shape': shapes.get(primary_path), + 'dtype': dtypes.get(primary_path) + } + if not summarize_datasets: + for p in paths: + if p not in datasets: + datasets[p] = {'shape': shapes.get(p), 'dtype': dtypes.get(p)} + else: + # summarize count of others + others = [p for p in paths if p != primary_path] + if others: + datasets['(other_datasets)'] = {'count': len(others)} + + # Determine recognized metadata keys to filter "other_metadata" + recognized = { + 'voxel_spacing', 'grid_origin', 'grid_dimensions_cells', 'array_order', + 'axes_labels', 'volume_shape', 'original_shape', 'intensity_range', 'num_images', + 'array_rank', 'array_shape' + } + other_metadata = {} + if include_unknown: + for k, v in md.items(): + if k not in recognized: + other_metadata[k] = v + + # Grouped dict + grouped = { + 'summary': { + 'valid': bool(info.get('valid', False)), + 'data_type': info.get('data_type', ''), + 'array_rank': array_rank, + 'array_shape': array_shape + }, + 'grid': { + 'voxel_spacing': voxel_spacing, + 'grid_origin': grid_origin, + 'grid_dimensions_cells': grid_cells, + 'grid_dimensions_points': grid_points, + 'array_order': array_order, + 'axes_labels': axes_labels + }, + 'volume': { + 'volume_shape': volume_shape, + 'original_shape': original_shape, + 'intensity_range': intensity_range, + 'num_images': num_images + }, + 'datasets': datasets, + 'entry_attrs': entry_attrs, + 'data_attrs': data_attrs, + 'other_metadata': other_metadata if include_unknown else {} + } + + if style == "dict": + return grouped + + # Default: style == "text" + lines = [] + def _section(title): + lines.append(f"{title}:") + def _kv(label, value): + if value is None or value == {} or value == []: + return + lines.append(f" - {label}: {_fmt_value(value)}") + + _section("Summary") + _kv("Valid", grouped['summary'].get('valid')) + _kv("Data Type", grouped['summary'].get('data_type')) + _kv("Array Rank", grouped['summary'].get('array_rank')) + _kv("Array Shape", grouped['summary'].get('array_shape')) + + _section("Grid") + _kv("Voxel Spacing (ΔH, ΔK, ΔL)", grouped['grid'].get('voxel_spacing')) + _kv("Grid Origin (H0, K0, L0)", grouped['grid'].get('grid_origin')) + _kv("Grid Dimensions (cells)", grouped['grid'].get('grid_dimensions_cells')) + _kv("Grid Dimensions (points)", grouped['grid'].get('grid_dimensions_points')) + _kv("Array Order", grouped['grid'].get('array_order')) + _kv("Axes Labels", grouped['grid'].get('axes_labels')) + + _section("Volume/Image") + _kv("Volume Shape (D,H,W)", grouped['volume'].get('volume_shape')) + _kv("Original Shape (H,W)", grouped['volume'].get('original_shape')) + _kv("Intensity Range", grouped['volume'].get('intensity_range')) + _kv("Num Images", grouped['volume'].get('num_images')) + + _section("Datasets") + if primary_path in datasets: + ds = datasets[primary_path] + _kv(f"{primary_path} shape", ds.get('shape')) + _kv(f"{primary_path} dtype", ds.get('dtype')) + if '(other_datasets)' in datasets: + _kv("Other datasets count", datasets['(other_datasets)'].get('count')) + elif not summarize_datasets: + for p, ds in datasets.items(): + if p == primary_path: + continue + _kv(f"{p} shape", ds.get('shape')) + _kv(f"{p} dtype", ds.get('dtype')) + + _section("Entry Attributes") + for k, v in grouped['entry_attrs'].items(): + _kv(k, v) + + _section("Data Attributes") + for k, v in grouped['data_attrs'].items(): + _kv(k, v) + + if include_unknown and grouped.get('other_metadata'): + _section("Other Metadata") + for k, v in grouped['other_metadata'].items(): + _kv(k, v) + + return "\n".join(lines) + + def convert_2d_to_3d(self, points_2d: np.ndarray, intensities: np.ndarray, + z_values: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]: + """ + Convert 2D points to 3D by adding Z dimension + + Args: + points_2d (np.ndarray): 2D points (N, 2) + intensities (np.ndarray): Intensity values + z_values (np.ndarray, optional): Z coordinates, defaults to zeros + + Returns: + Tuple containing: + - points_3d (np.ndarray): 3D points (N, 3) + - intensities (np.ndarray): Intensity values + """ + pass + + def extract_slice_from_3d(self, points_3d: np.ndarray, intensities: np.ndarray, + slice_axis: str = 'z', slice_value: float = 0.0, + tolerance: float = 0.1) -> Tuple[np.ndarray, np.ndarray]: + """ + Extract 2D slice from 3D point cloud + + Args: + points_3d (np.ndarray): 3D points (N, 3) + intensities (np.ndarray): Intensity values + slice_axis (str): Axis to slice along ('x', 'y', 'z') + slice_value (float): Value along axis to slice at + tolerance (float): Tolerance for slice selection + + Returns: + Tuple containing: + - points_2d (np.ndarray): 2D points from slice + - intensities_2d (np.ndarray): Corresponding intensities + """ + pass + + def get_last_error(self) -> str: + """Return the last recorded error message.""" + return self.last_error + + # ================ ERROR HANDLING ======================= # + + def _handle_loading_error(self, error: Exception, file_path: str) -> None: + """ + Handle errors during loading operations + + Args: + error (Exception): The exception that occurred + file_path (str): Path to file that caused error + """ + f = open('error_output.txt', 'w') + f.write(f'Error Loading HDF5 File: {str(error)}\nHDF5 File Path: {file_path}\n\nTraceback:\n{traceback.format_exc()}') + f.close() + print(self.last_error) + + + def _handle_saving_error(self, error: Exception, file_path: str) -> None: + """ + Handle errors during saving operations + + Args: + error (Exception): The exception that occurred + file_path (str): Path to file that caused error + """ + f = open('error_output.txt', 'w') + f.write(f'Error Saving HDF5 File: {str(error)}\nHDF5 File Path: {file_path}\n\nTraceback:\n{traceback.format_exc()}') + f.close() + print(self.last_error) + +# ================= HKL METADATA DISCOVERY HELPER ======================= # + +def discover_hkl_axis_labels(file_path: str) -> dict: + """ + Discover friendly HKL axis/motor labels from /entry/data/metadata/HKL only. + + New format: + - Read NAME datasets under SAMPLE_CIRCLE_AXIS_1..4 and DETECTOR_CIRCLE_AXIS_1..2 + Legacy format: + - If NAME is not present but MU/ETA/CHI/PHI/NU/DELTA subgroups exist, use those identifiers + + Returns a dict with keys: + - present: bool (True if HKL metadata group exists) + - sample_axes: list of strings from NAME datasets (new format) + - detector_axes: list of strings from NAME datasets (new format) + - legacy_sample: list of legacy identifiers (MU/ETA/CHI/PHI) if found + - legacy_detector: list of legacy identifiers (NU/DELTA) if found + - display_text: single-line summary suitable for UI overlays + """ + labels = { + 'present': False, + 'sample_axes': [], + 'detector_axes': [], + 'legacy_sample': [], + 'legacy_detector': [], + 'display_text': 'HKL metadata: -' + } + try: + if not os.path.exists(file_path) or not h5py.is_hdf5(file_path): + return labels + with h5py.File(file_path, 'r') as f: + base = '/entry/data/metadata/HKL' + if base not in f: + return labels + grp = f[base] + labels['present'] = True + + def _read_name_from(subgrp) -> Optional[str]: + if not isinstance(subgrp, h5py.Group): + return None + ds = subgrp.get('NAME') + if not isinstance(ds, h5py.Dataset): + return None + try: + val = ds.asstr()[()] if hasattr(ds, 'asstr') else ds[()] + except Exception: + return None + # Normalize to str + if isinstance(val, (bytes, np.bytes_)): + try: + val = val.decode('utf-8', errors='ignore') + except Exception: + val = str(val) + return str(val) + + # New format axes discovery + for i in range(1, 5): + nm = _read_name_from(grp.get(f'SAMPLE_CIRCLE_AXIS_{i}')) + if nm: + labels['sample_axes'].append(nm) + for i in range(1, 3): + nm = _read_name_from(grp.get(f'DETECTOR_CIRCLE_AXIS_{i}')) + if nm: + labels['detector_axes'].append(nm) + + # Legacy fallback when NAME not present + for key in ('MU', 'ETA', 'CHI', 'PHI'): + if key in grp and isinstance(grp[key], h5py.Group): + labels['legacy_sample'].append(key) + for key in ('NU', 'DELTA'): + if key in grp and isinstance(grp[key], h5py.Group): + labels['legacy_detector'].append(key) + + # Build display text prioritizing new format + def fmt_list(lst): + return ', '.join([str(x) for x in lst]) if lst else '-' + + sample = labels['sample_axes'] if labels['sample_axes'] else labels['legacy_sample'] + detector = labels['detector_axes'] if labels['detector_axes'] else labels['legacy_detector'] + labels['display_text'] = f"Sample motors: {fmt_list(sample)} | Detector motors: {fmt_list(detector)}" + except Exception: + # Best-effort: return what we have + pass + return labels diff --git a/utils/hdf5_writer.py b/utils/hdf5_writer.py new file mode 100644 index 0000000..2739605 --- /dev/null +++ b/utils/hdf5_writer.py @@ -0,0 +1,187 @@ +import h5py +import numpy as np +from pathlib import Path +from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot +import hdf5plugin +from utils.metadata_converter import convert_files_or_dir +import settings +# removed traceback import +# from utils import PVAReader + +class HDF5Writer(QObject): + hdf5_writer_finished = pyqtSignal(str) + + def __init__(self, file_path: str, pva_reader): + super(HDF5Writer, self).__init__() + self.file_path = file_path + self.pva_reader = pva_reader + self.default_output_file_config = {'FilePath': 'SCAN_OUTPUT.h5'} + + @pyqtSlot() + def save_caches_to_h5(self, clear_caches:bool=True, compress=False) -> None: + # TODO: add analysis + """ + Saves available caches (images and HKL data) to an HDF5 file under a branch structure. + The file structure is as follows: + /entry/data --> The image cache array + /entry/rois/ROI1-4 + /entry/metadata/motor_positions + /entry/analysis/intensity + /entry/analysis/comx + /entry/analysis/comy + /entry/HKL/qx --> The qx cache array (if available) + /entry/HKL/qy --> The qy cache array (if available) + /entry/HKL/qz --> The qz cache array (if available) + + Args: + filename (str): The output HDF5 file name. + """ + OUTPUT_FILE_LOCATION = "UNKNOWN_FILE" # Initialize to avoid UnboundLocalError + try: + config = self.pva_reader.get_config_settings() + OUTPUT_FILE_CONFIG= config.get('OUTPUT_FILE_CONFIG', self.default_output_file_config) + HKL_IN_CONFIG = config.get('HKL_IN_CONFIG', False) + all_caches = self.pva_reader.get_all_caches(clear_caches=clear_caches) + images = all_caches['images'] + attributes = all_caches['attributes'] + rsm = all_caches['rsm'] + shape = self.pva_reader.get_shape() + + len_images = len(images) + len_attributes = len(attributes) + print("Len of attr then images", len_attributes, len_images) + + if len(OUTPUT_FILE_CONFIG) == 2: + file_path = Path('~/hdf5/').expanduser() + if not file_path.exists(): + file_path.mkdir(parents=True) + file_name = Path('temp_hkl_3d.h5') + TEMP_FILE_LOCATION = file_path.joinpath(file_name) + + file_path = Path(OUTPUT_FILE_CONFIG['FilePath']).expanduser() + if not file_path.exists(): + file_path.mkdir(parents=True) + file_name = Path(OUTPUT_FILE_CONFIG['FileName']) + OUTPUT_FILE_LOCATION = file_path.joinpath(file_name) + else: + OUTPUT_FILE_LOCATION = Path(OUTPUT_FILE_CONFIG['FilePath']).expanduser() + if not OUTPUT_FILE_LOCATION.parent.exists(): + OUTPUT_FILE_LOCATION.parent.mkdir(parents=True, exist_ok=True) # ensures directory exists before writing any files. + + if len_images != len_attributes: + + min_length = min(len_images, len_attributes) + if min_length > 0: + images = images[:min_length] + attributes = attributes[:min_length] + len_images = len(images) + len_attributes = len(attributes) + else: + raise ValueError(f"[Saving Caches] Cannot fix cache mismatch - both caches would be empty. Images: {len_images}, Attributes: {len_attributes}") + if images is None or len_images == 0: + raise ValueError("[Saving Caches] Caches cannot be empty.") + + merged_metadata = {} + for attribute_dict in attributes: + for key, value in attribute_dict.items(): + if not (key == 'RSM' or key == 'Analysis'): + # if type(value) == list: + # value = np.asarray(value) + if key not in merged_metadata: + merged_metadata[key] = [] + merged_metadata[key].append(value) + else: + merged_metadata[key].append(value) + + with h5py.File(OUTPUT_FILE_LOCATION, 'w') as h5f: + # Create the main "images" group + images_grp = h5f.create_group("entry") + data_grp = images_grp.create_group('data') + data_grp.create_dataset("data", data=np.array([np.reshape(img, shape) for img in images]), + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + metadata_grp = data_grp.create_group("metadata") + motor_pos_grp = metadata_grp.create_group('motor_positions') + rois_grp = data_grp.create_group('rois') + for key, values in merged_metadata.items(): + if all(isinstance(v, (int, float, np.number)) for v in values): + if 'ROI' in key: + parts = key.split(':') + roi = parts[1] + if roi not in rois_grp.keys(): + rois_grp.create_group(name=roi) + rois_grp[roi].create_dataset(key, data=np.array(values)) + elif 'Position' in key: + motor_pos_grp.create_dataset(key, data=np.array(values)) + else: + metadata_grp.create_dataset(key, data=np.array(values)) + elif all(isinstance(v, str) for v in values): + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=np.array(values, dtype=dt)) + else: + metadata_grp.create_dataset(key, data=np.array(np.reshape(values, -1))) + # Create HKL subgroup under images if HKL caches exist + + if not compress: + with h5py.File(TEMP_FILE_LOCATION, 'w') as h5f: + # Create the main "images" group + images_grp = h5f.create_group("entry") + data_grp = images_grp.create_group('data') + data_grp.create_dataset("data", data=np.array([np.reshape(img, shape) for img in images]), + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + metadata_grp = data_grp.create_group("metadata") + motor_pos_grp = metadata_grp.create_group('motor_positions') + rois_grp = data_grp.create_group('rois') + for key, values in merged_metadata.items(): + if all(isinstance(v, (int, float, np.number)) for v in values): + if 'ROI' in key: + parts = key.split(':') + roi = parts[1] + if roi not in rois_grp.keys(): + rois_grp.create_group(name=roi) + rois_grp[roi].create_dataset(key, data=np.array(values)) + elif 'Position' in key: + motor_pos_grp.create_dataset(key, data=np.array(values)) + else: + metadata_grp.create_dataset(key, data=np.array(values)) + elif all(isinstance(v, str) for v in values): + dt = h5py.string_dtype(encoding='utf-8') + metadata_grp.create_dataset(key, data=np.array(values, dtype=dt)) + else: + metadata_grp.create_dataset(key, data=np.array(np.reshape(values, -1))) + + if HKL_IN_CONFIG and rsm: + len_rsm = len(rsm[0]) + if rsm: + if not (len_rsm == len_images): + raise ValueError("[Saving Caches] qx, qy, and qz caches must have the same number of elements.") + hkl_grp = data_grp.create_group(name="hkl") + hkl_grp.create_dataset("qx", data=np.array([np.reshape(qx, shape) for qx in rsm[0]]), + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + hkl_grp.create_dataset("qy", data=np.array([np.reshape(qy, shape) for qy in rsm[1]]), + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + hkl_grp.create_dataset("qz", data=np.array([np.reshape(qz, shape) for qz in rsm[2]]), + **hdf5plugin.Blosc(cname='lz4', clevel=5, shuffle=True)) + # removed debug prints: (TEMP) qx/qy/qz writes and finished HKL datasets + + # Auto-convert metadata structure per current TOML before emitting signal + conversion_suffix = "" + try: + toml_path = settings.ensure_path() + if toml_path: + convert_files_or_dir( + toml_path=toml_path, + hdf5_path=str(OUTPUT_FILE_LOCATION), + base_group="entry/data/metadata", + include=True, + in_place=True, + recursive=False, + ) + conversion_suffix = " (converted)" + else: + conversion_suffix = " (conversion skipped: no TOML path)" + except Exception as conv_err: + conversion_suffix = f" (conversion failed: {conv_err})" + + self.hdf5_writer_finished.emit(f"{len_images} successfully saved to {OUTPUT_FILE_LOCATION}{conversion_suffix}") + except Exception as e: + self.hdf5_writer_finished.emit(f"Failed to save caches to {OUTPUT_FILE_LOCATION}: {e}") diff --git a/utils/metadata_converter.py b/utils/metadata_converter.py new file mode 100644 index 0000000..8346f5a --- /dev/null +++ b/utils/metadata_converter.py @@ -0,0 +1,286 @@ +import os +import traceback +import logging +from pathlib import Path +from typing import List +import shutil + +import h5py +import numpy as np +import toml + +logger = logging.getLogger(__name__) +log_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'logs') +try: + os.makedirs(log_dir, exist_ok=True) + fh = logging.FileHandler(os.path.join(log_dir, 'metadata_writer_errors.log')) + fh.setLevel(logging.ERROR) + fh.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')) + if not any(isinstance(h, logging.FileHandler) and getattr(h, 'baseFilename', '') == fh.baseFilename for h in logger.handlers): + logger.addHandler(fh) + logger.setLevel(logging.ERROR) + logger.propagate = False +except Exception: + pass + + +def is_numeric(value): + return isinstance(value, (int, float, bool, np.number)) + + +def is_position_pv(pv: str) -> bool: + if not isinstance(pv, str): + return False + return (":Position" in pv) or (".RBV" in pv) or ("_RBV" in pv) + + +def _ensure_parent_group(h5_file: h5py.File, full_path: str): + parent_path = "/".join(full_path.split("/")[:-1]) + if parent_path: + h5_file.require_group(parent_path) + + +def _find_dataset_path_by_name(h5_file: h5py.File, base_group_path: str, dataset_name: str): + """ + Walk under base_group_path and return the full path of the first dataset whose leaf name equals dataset_name. + """ + if base_group_path not in h5_file: + return None + + found_path = None + + def visitor(name, obj): + nonlocal found_path + if found_path is not None: + return + try: + if isinstance(obj, h5py.Dataset): + leaf = name.split('/')[-1] + if leaf == dataset_name: + found_path = f"{base_group_path}/{name}" if not name.startswith(base_group_path) else name + except Exception: + pass + + h5_file[base_group_path].visititems(visitor) + return found_path + + +def resolve_pv_dataset(h5_file: h5py.File, pv: str, base_group: str = "entry/data/metadata"): + """ + Resolve a PV string to an existing dataset path inside the saved HDF5 file using known locations. + - Position PVs -> {base_group}/motor_positions/{pv} + - Other PVs -> {base_group}/{pv} + - Fallback: search under base_group for a dataset whose leaf name == pv + Returns (resolved_path, dataset_obj) or (None, None) if not found. + """ + if not isinstance(pv, str): + return None, None + + candidates = [] + if is_position_pv(pv): + candidates.append(f"{base_group}/motor_positions/{pv}") + candidates.append(f"{base_group}/{pv}") + + for cand in candidates: + if cand in h5_file and isinstance(h5_file[cand], h5py.Dataset): + return cand, h5_file[cand] + + found = _find_dataset_path_by_name(h5_file, base_group, pv) + if found and found in h5_file and isinstance(h5_file[found], h5py.Dataset): + return found, h5_file[found] + + return None, None + + +def copy_dataset_like(h5_file: h5py.File, source: h5py.Dataset, target_path: str): + """ + Copy real array data to target_path, duplicating shape/dtype and attempting to carry storage settings. + If target exists, delete it first. + """ + if target_path in h5_file: + del h5_file[target_path] + + _ensure_parent_group(h5_file, target_path) + + try: + dset = h5_file.create_dataset_like(target_path, source, shape=source.shape, dtype=source.dtype) + dset[...] = source[...] + return True + except Exception: + try: + h5_file.create_dataset(target_path, data=source[()]) + return True + except Exception: + return False + + +def _build_axis_lookup(mapping: dict): + axis_lookup = {} + if 'METADATA' in mapping and 'CA' in mapping['METADATA']: + for axis_label, pv_string in mapping['METADATA']['CA'].items(): + if isinstance(pv_string, str) and ':' in pv_string: + motor_id = pv_string.split(':')[1].split('.')[0].split('_')[0] + axis_lookup[motor_id] = axis_label.upper() + return axis_lookup + + +def _process_structure(h5_file: h5py.File, current_path: str, mapping_dict: dict, axis_lookup: dict, stats: dict, base_group: str, include: bool): + for key, value in mapping_dict.items(): + new_path = f"{current_path}/{key}" + + if isinstance(value, dict): + h5_file.require_group(new_path) + _process_structure(h5_file, new_path, value, axis_lookup, stats, base_group, include) + continue + + try: + # Write NAME labels when possible + if isinstance(value, str) and ':' in value: + parts = value.split(':') + if len(parts) > 1: + motor_id = parts[1].split('_')[0].split('.')[0] + axis_name = axis_lookup.get(motor_id) + parent_path = "/".join(new_path.split('/')[:-1]) + name_path = f"{parent_path}/NAME" + if axis_name and name_path not in h5_file: + h5_file.create_dataset(name_path, data=axis_name) + stats.setdefault("names", 0) + stats["names"] += 1 + + if not include: + # Only build hierarchy and NAME labels + continue + + if isinstance(value, str): + resolved_path, source_node = resolve_pv_dataset(h5_file, value, base_group) + if source_node is not None: + # Special handling for UB matrix: store first 9 values as a flat array + if new_path.endswith("HKL/SPEC/UB_MATRIX_VALUE"): + try: + raw = np.asarray(source_node[...]).ravel() + if raw.size < 9: + stats["warnings"] += 1 + logger.error(f"UB source at '{resolved_path}' has only {raw.size} elements; writing as-is to '{new_path}'. Downstream may fail.") + ub9 = raw + else: + ub9 = raw[:9] + if new_path in h5_file: + del h5_file[new_path] + _ensure_parent_group(h5_file, new_path) + h5_file.create_dataset(new_path, data=ub9) + stats["created"] += 1 + continue + except Exception: + stats["warnings"] += 1 + logger.exception(f"Failed to write UB matrix to '{new_path}' from '{resolved_path}'") + # Generic copy for other datasets + ok = copy_dataset_like(h5_file, source_node, new_path) + if ok: + stats["created"] += 1 + else: + stats["warnings"] += 1 + continue + else: + stats["warnings"] += 1 + continue + + if is_numeric(value): + if new_path in h5_file: + del h5_file[new_path] + _ensure_parent_group(h5_file, new_path) + h5_file.create_dataset(new_path, data=value) + stats["constants"] += 1 + continue + + stats["warnings"] += 1 + + except Exception: + stats["warnings"] += 1 + logger.exception(f"Error processing key '{key}' at path '{new_path}' with value '{value}'") + + +def _convert_single_file(src_file: Path, toml_path: Path, base_group: str, include: bool, in_place: bool, output_dir: Path, dry_run: bool) -> str: + mapping = toml.load(str(toml_path)) + axis_lookup = _build_axis_lookup(mapping) + + # Determine destination file path + if in_place: + dst = src_file + else: + output_dir.mkdir(parents=True, exist_ok=True) + dst = output_dir.joinpath(f"{src_file.stem}_meta_update.h5") + + if dry_run: + return str(dst) + + # If writing to a copy, duplicate file bytes first + if not in_place: + shutil.copy2(src_file, dst) + + stats = {"created": 0, "constants": 0, "warnings": 0} + + with h5py.File(str(dst), 'r+') as h5_file: + # Ensure base group exists + h5_file.require_group(base_group) + _process_structure(h5_file, base_group + "/HKL", mapping.get('HKL', mapping), axis_lookup, stats, base_group, include) + + return str(dst) + + +def convert_files_or_dir( + toml_path: str, + hdf5_path: str, + base_group: str = "entry/data/metadata", + include: bool = False, + in_place: bool = False, + output_dir: str = "outputs/conversions", + recursive: bool = False, + pattern: str = "*.h5", + dry_run: bool = False, +) -> List[str]: + """ + Convert metadata structure in HDF5 file(s) per TOML mapping. + + Parameters: + - toml_path: Path to TOML mapping file containing HKL hierarchy and PV/constant mappings. + - hdf5_path: Path to a single HDF5 file or a directory containing HDF5 files. + - base_group: Base group under which metadata resides (default 'entry/data/metadata'). + - include: When False, only build the HKL group hierarchy and NAME labels; do not copy datasets or constants. + When True, also copy datasets from PVs and write constants according to mapping. + - in_place: If True, modify files in place. If False, write converted copies to output_dir + as '_meta_update.h5'. Originals remain untouched. + - output_dir: Directory to write converted copies when in_place=False (default 'outputs/conversions'). + - recursive: When hdf5_path is a directory, recurse into subdirectories to find files. + - pattern: Glob pattern for selecting files within a directory (default '*.h5'). + - dry_run: If True, do not perform writes; return the list of planned output file paths. + + Returns: + - List of output file paths (planned paths in dry_run mode). + + Notes: + - NAME labels are derived using 'METADATA.CA' section of the TOML mapping, matching motor IDs. + - UB_MATRIX_VALUE is truncated to the first 9 elements if present when include=True. + """ + src = Path(hdf5_path) + toml_p = Path(toml_path) + out_dir = Path(output_dir) + + files: List[Path] = [] + if src.is_file(): + files = [src] + elif src.is_dir(): + if recursive: + files = [p for p in src.rglob(pattern)] + else: + files = [p for p in src.glob(pattern)] + else: + raise FileNotFoundError(f"Path not found: {hdf5_path}") + + outputs: List[str] = [] + for f in files: + try: + out = _convert_single_file(f, toml_p, base_group, include, in_place, out_dir, dry_run) + outputs.append(out) + except Exception: + logger.exception(f"Failed to convert: {f}") + return outputs diff --git a/utils/pva_reader.py b/utils/pva_reader.py new file mode 100644 index 0000000..0b51366 --- /dev/null +++ b/utils/pva_reader.py @@ -0,0 +1,635 @@ +import toml +import blosc2 +import lz4.block +import bitshuffle +import numpy as np +import pvaccess as pva +from collections import deque +from epics import camonitor, caget +from PyQt5.QtCore import QObject, pyqtSignal + +class PVAReader(QObject): + # Signals + # signal_image_updated = pyqtSignal(np.ndarray) + # signal_attributes_updated = pyqtSignal(dict) + # signal_roi_updated = pyqtSignal(dict) + # signal_rsm_updated = pyqtSignal(dict) + # signal_analysis_updated = pyqtSignal(dict) + reader_scan_complete = pyqtSignal() + scan_state_changed = pyqtSignal(bool) + + def __init__(self, + input_channel='s6lambda1:Pva1:Image', + provider=pva.PVA, + config_filepath: str = 'pv_configs/metadata_pvs.toml', + viewer_type:str='image'): + """ + Initializes the PVA Reader for monitoring connections and handling image data. + + Args: + input_channel (str): Input channel for the PVA connection. + provider (protocol): The protocol for the PVA channel. + config_filepath (str): File path to the configuration TOML file. + """ + super(PVAReader, self).__init__() + # Each PVA ScalarType is enumerated in C++ starting 1-10 + # This means we map them as numbers to a numpy datatype which we parse from pva codec parameters + # Then use this to correctly decompress the image depending on the codec used + self.NUMPY_DATA_TYPE_MAP = { + pva.UBYTE : np.dtype('uint8'), + pva.BYTE : np.dtype('int8'), + pva.USHORT : np.dtype('uint16'), + pva.SHORT : np.dtype('int16'), + pva.UINT : np.dtype('uint32'), + pva.INT : np.dtype('int32'), + pva.ULONG : np.dtype('uint64'), + pva.LONG : np.dtype('int64'), + pva.FLOAT : np.dtype('float32'), + pva.DOUBLE : np.dtype('float64') + } + + # This also means we can parse the pva codec parameters to show the correct datatype in viewer + # rather than using default compressed dtype + self.NTNDA_DATA_TYPE_MAP = { + pva.UBYTE : 'ubyteValue', + pva.BYTE : 'byteValue', + pva.USHORT : 'ushortValue', + pva.SHORT : 'shortValue', + pva.UINT : 'uintValue', + pva.INT : 'intValue', + pva.ULONG : 'ulongValue', + pva.LONG : 'longValue', + pva.FLOAT : 'floatValue', + pva.DOUBLE : 'doubleValue', + } + + self.NTNDA_NUMPY_MAP = { + 'ubyteValue' : np.dtype('uint8'), + 'byteValue' : np.dtype('int8'), + 'ushortValue' : np.dtype('uint16'), + 'shortValue' : np.dtype('int16'), + 'uintValue' : np.dtype('uint32'), + 'intValue' : np.dtype('int32'), + 'ulongValue' : np.dtype('uint64'), + 'longValue' : np.dtype('int64'), + 'floatValue' : np.dtype('float32'), + 'doubleValue' : np.dtype('float64') + } + + self.VIEWER_TYPE_MAP = { + 'image': 'i', + 'analysis': 'a', + 'rsm': 'r' + } + + # variables related to monitoring connection + self.input_channel = input_channel + self.provider = provider + self.channel = pva.Channel(self.input_channel, self.provider) + self.pva_prefix = input_channel.split(":")[0] + + # variables setup using config + self.config = {} + self.rois = {} + self._roi_names = [] + self.stats = {} + self.CONSUMER_MODE = '' + self.OUTPUT_FILE_LOCATION = '' + self.ROI_IN_CONFIG = False + self.ANALYSIS_IN_CONFIG = False + self.HKL_IN_CONFIG = False + self.CACHE_OPTIONS = {} + self.CACHING_MODE = '' + self.MAX_CACHE_SIZE = 0 + self.is_caching = False + self.is_scan_complete = False + + # variables that will store pva data + self.pva_object = None + self.image = None + self.shape = (0,0) + self.timestamp = None + self.data_type = None + self.display_dtype = None + self.numpy_dtype = None + self.attributes = [] + self.pv_attributes = {} + + # variables used for image manipulaiton + self.pixel_ordering = 'F' + self.viewer_type = self.VIEWER_TYPE_MAP.get(viewer_type, 'i') + self.image_is_transposed = False + + # variables used for parsing specific attribute data from pv + self.analysis_index = None + self.analysis_attributes = {} + self.rsm_attributes = {} + + # variables used for frame count + self.last_array_id = None + self.frames_missed = 0 + self.frames_received = 0 + self.id_diff = 0 + + # variables for data caches + self.caches_needed = False + self.caches_initialized = False + self.cached_attributes = None + self.cached_images = None + self.cached_qx = None + self.cached_qy = None + self.cached_qz = None + # self._on_scan_complete_callbacks = [] + + self._configure(config_filepath) + +############################# Configuration ############################# + def _configure(self, config_path: str) -> None: + if config_path != '': + with open(config_path, 'r') as toml_file: + # loads toml config into a python dict + self.config:dict = toml.load(toml_file) + + #TODO: make it so that file location can be parsed as a pv with a function + # using something like caget or parse the pv attributes + self.OUTPUT_FILE_LOCATION = self.config.get('OUTPUT_FILE_LOCATION','OUTPUT.h5') + self.stats:dict = self.config.get('STATS', {}) + self.ROI_IN_CONFIG = ('ROI' in self.config) + self.ANALYSIS_IN_CONFIG = ('ANALYSIS' in self.config) + self.HKL_IN_CONFIG = ('HKL' in self.config) + + if self.config.get('DETECTOR_PREFIX', ''): + self.pva_prefix = self.config['DETECTOR_PREFIX'] + + if self.ROI_IN_CONFIG: + for roi in self.config['ROI']: + self._roi_names.append(roi) + + # Configuring Cache settings + self.CACHE_OPTIONS: dict = self.config.get('CACHE_OPTIONS', {}) + self.set_cache_options() + if self.caches_needed != self.caches_initialized: + self.init_caches() + + # Configuring Analysis Caches + if self.ANALYSIS_IN_CONFIG: + self.CONSUMER_MODE = self.config.get('CONSUMER_MODE', '') + if self.CONSUMER_MODE == "continuous": + self.analysis_cache_dict = {"Position": set(), + "Intensity": {}, + "ComX": {}, + "ComY": {}} + def set_cache_options(self) -> None: + self.CACHING_MODE = self.CACHE_OPTIONS.get('CACHING_MODE', '') + if self.CACHING_MODE != '': + self.caches_needed = True + if self.CACHING_MODE == 'alignment': + self.MAX_CACHE_SIZE = self.CACHE_OPTIONS.setdefault('ALIGNMENT', {'MAX_CACHE_SIZE': 100}).get('MAX_CACHE_SIZE') + elif self.CACHING_MODE == 'scan': + self.FLAG_PV = self.CACHE_OPTIONS.setdefault('SCAN', {'FLAG_PV': ''}).get('FLAG_PV') + self.START_SCAN = self.CACHE_OPTIONS.setdefault('SCAN', {'START_SCAN': True}).get('START_SCAN') + self.STOP_SCAN = self.CACHE_OPTIONS.setdefault('SCAN', {'STOP_SCAN': False}).get('STOP_SCAN') + self.MAX_CACHE_SIZE = self.CACHE_OPTIONS.setdefault('SCAN', {'MAX_CACHE_SIZE': 100}).get('MAX_CACHE_SIZE') + elif self.CACHING_MODE == 'bin': + self.BIN_COUNT = self.CACHE_OPTIONS.setdefault('BIN', {'COUNT': 10}).get('COUNT') + self.BIN_SIZE = self.CACHE_OPTIONS.setdefault('BIN', {'SIZE': 16}).get('SIZE') + + def init_caches(self) -> None: + if self.CACHING_MODE == 'alignment' or self.CACHING_MODE == 'scan': + self.cached_images = deque(maxlen=self.MAX_CACHE_SIZE) + self.cached_attributes = deque(maxlen=self.MAX_CACHE_SIZE) + if self.HKL_IN_CONFIG: + self.cached_qx = deque(maxlen=self.MAX_CACHE_SIZE) + self.cached_qy = deque(maxlen=self.MAX_CACHE_SIZE) + self.cached_qz = deque(maxlen=self.MAX_CACHE_SIZE) + elif self.CACHING_MODE == 'bin': + # TODO: when creating the h5 file, have one entry called data that is the average of each bin + # and then an entry for each bin that lines up with the attributes and rsm attributes + self.cached_images = [deque(maxlen=self.BIN_SIZE) for _ in range(self.BIN_COUNT)] + self.cached_attributes = [deque(maxlen=self.BIN_SIZE) for _ in range(self.BIN_COUNT)] + if self.HKL_IN_CONFIG: + self.cached_qx = [deque(maxlen=self.BIN_SIZE) for _ in range(self.BIN_COUNT)] + self.cached_qy = [deque(maxlen=self.BIN_SIZE) for _ in range(self.BIN_COUNT)] + self.cached_qz = [deque(maxlen=self.BIN_SIZE) for _ in range(self.BIN_COUNT)] + self.caches_initialized = True + +#################### Class and PVA Channel Callbacks ######################## + # def add_on_scan_complete_callback(self, callback_func): + # if callable(callback_func): + # self._on_scan_complete_callbacks.append(callback_func) + def pva_callbackSuccess(self, pv) -> None: + """ + Callback for handling monitored PVA changes. + + Args: + pv (PvObject): The PVA object received by the channel monitor. + """ + try: + self.frames_received += 1 + self.pva_object = pv + + # parse data required to manipulate pv image + self.parse_image_data_type(pv) + self.shape = self.parse_img_shape(pv) + self.image = self.pva_to_image(pv) + + # update with latest pv metadata + self.pv_attributes = self.parse_attributes(pv) + + # Check for any roi pvs in metadata + if self.ROI_IN_CONFIG: + self.parse_roi_pvs(self.pv_attributes) + + # Check for rsm attributes in metadata + if self.HKL_IN_CONFIG and 'RSM' in self.pv_attributes: + self.parse_rsm_attributes(self.pv_attributes) + + if self.ANALYSIS_IN_CONFIG and 'Analysis' in self.pv_attributes: + self.parse_analysis_attributes() + + if self.caches_initialized: + self.cache_attributes(self.pv_attributes, self.rsm_attributes) + self.cache_image(np.ravel(self.image)) + + #TODO: depreciated change the parsing to be closer to parsing RSM attributes with the new parse_attributes function + if self.ANALYSIS_IN_CONFIG: + self.analysis_index = self.locate_analysis_index() + # Only runs if an analysis index was found + if self.analysis_index is not None: + self.analysis_attributes = self.attributes[self.analysis_index] + if self.config["CONSUMER_MODE"] == "continuous": + # turns axis1 and axis2 into a tuple + incoming_coord = (self.analysis_attributes["value"][0]["value"].get("Axis1", 0.0), + self.analysis_attributes["value"][0]["value"].get("Axis2", 0.0)) + # use a tuple as a key so that we can check if there is a repeat position + self.analysis_cache_dict["Intensity"].update({incoming_coord: self.analysis_cache_dict["Intensity"].get(incoming_coord, 0) + self.analysis_attributes["value"][0]["value"].get("Intensity", 0.0)}) + self.analysis_cache_dict["ComX"].update({incoming_coord: self.analysis_cache_dict["ComX"].get(incoming_coord, 0) + self.analysis_attributes["value"][0]["value"].get("ComX", 0.0)}) + self.analysis_cache_dict["ComY"].update({incoming_coord: self.analysis_cache_dict["ComY"].get(incoming_coord, 0) + self.analysis_attributes["value"][0]["value"].get("ComY", 0.0)}) + # double storing of the postion, will find out if needed + self.analysis_cache_dict["Position"][incoming_coord] = incoming_coord + + if self.is_scan_complete and not self.is_caching: + self.is_scan_complete = False + print('Scan Complete') + #TODO: add a check before emitting the signal to make sure the caches are all same length + # make into a function + self.reader_scan_complete.emit() + + except Exception as e: + print(f'[PVA Reader] Failed to execute callback: {e}') + self.frames_received -= 1 + self.frames_missed += 1 + + def roi_backup_callback(self, pvname, value, **kwargs) -> None: + name_components = pvname.split(":") + roi_key = name_components[1] + pv_key = name_components[2] + pv_value = value + # can't append simply by using 2 keys in a row (self.rois[roi_key][pv_key]), there must be an inner dict to call + # then adds the key to the inner dictionary with update + self.rois.setdefault(roi_key, {}).update({pv_key: pv_value}) + +########################### PVA PARSING ################################## + def locate_analysis_index(self) -> int|None: + """ + Locates the index of the analysis attribute in the PVA attributes. + + Returns: + int: The index of the analysis attribute or None if not found. + """ + if self.pv_attributes: + for i, attr_name in enumerate(self.pv_attributes.keys()): + if attr_name == "Analysis": + return i + else: + return None + + def parse_image_data_type(self, pva_object) -> None: + """ + Parses the PVA Object to determine the incoming data type. + """ + if pva_object is not None: + try: + self.data_type = list(pva_object['value'][0].keys())[0] + self.display_dtype = self.data_type if pva_object['codec']['name'] == '' else self.NTNDA_DATA_TYPE_MAP.get(pva_object['codec']['parameters'][0]['value']) + self.numpy_dtype = self.NTNDA_NUMPY_MAP.get(self.display_dtype, None) + except: + self.display_dtype = "could not detect" + + def parse_img_shape(self, pva_object) -> tuple: + if 'dimension' in pva_object: + return tuple([dim['size'] for dim in pva_object['dimension']]) + + def parse_attributes(self, pva_object) -> dict: + pv_attributes = {} + if pva_object != None and 'attribute' in pva_object: + pv_attributes['timeStamp-secondsPastEpoch'] = pva_object['timeStamp']['secondsPastEpoch'] + pv_attributes['timeStamp-nanoseconds'] = pva_object['timeStamp']['nanoseconds'] + attributes = pva_object['attribute'] + for attr in attributes: + name = attr['name'] + value = attr['value'][0].get('value', None) + if value is not None: + pv_attributes[name] = value + return pv_attributes + else: + return {} + + def parse_analysis_attributes(self, pv_attributes: dict) -> None: + pass + # analysis_attributes: dict = pv_attributes['Analysis'] + # axis_pos = (analysis_attributes['Axis1'], analysis_attributes['Axis2']) + # intensity = analysis_attributes['Intensity'] + + def parse_rsm_attributes(self, pv_attributes: dict) -> None: + rsm_attributes: dict = pv_attributes['RSM'] + codec = rsm_attributes['codec'].get('name', '') + if codec != '': + dtype = self.NUMPY_DATA_TYPE_MAP.get(rsm_attributes['codec']['parameters']) + self.rsm_attributes = {'qx' : self.decompress_array(compressed_array=rsm_attributes['qx']['value'], + codec=codec, + uncompressed_size=rsm_attributes['qx']['uncompressedSize'], + dtype=dtype), + 'qy' : self.decompress_array(compressed_array=rsm_attributes['qy']['value'], + codec=codec, + uncompressed_size=rsm_attributes['qy']['uncompressedSize'], + dtype=dtype), + 'qz' : self.decompress_array(compressed_array=rsm_attributes['qz']['value'], + codec=codec, + uncompressed_size=rsm_attributes['qz']['uncompressedSize'], + dtype=dtype)} + else: + self.rsm_attributes = {'qx' : rsm_attributes['qx']['value'], + 'qy' : rsm_attributes['qy']['value'], + 'qz' : rsm_attributes['qz']['value']} + + def parse_roi_pvs(self, pv_attributes: dict) -> None: + """ + Parses attributes to extract ROI-specific PV information. + """ + for roi in self._roi_names: + for dimension in ['MinX', 'MinY', 'SizeX', 'SizeY']: + pv_key = f'{self.pva_prefix}:{roi}:{dimension}' + pv_value = pv_attributes.get(pv_key, None) + # If a dictionary is empty, can't create an inner dictionary by using another [] at the end, there must be a dict to call to. + # To make sure there is an inner dictionary, we use .setdefault() to initialize the inner dictionary + if pv_value is not None: + self.rois.setdefault(roi, {}).update({dimension: pv_value}) + + def pva_to_image(self, pva_object) -> np.ndarray: + """ + Converts the PVA Object to an image array and determines if a frame was missed. + Handles bslz4 and lz4 compressed image data. + + image is of type np.ndarray + """ + try: + if 'dimension' in pva_object: + if pva_object['codec']['name'] != '': + image: np.ndarray = self.decompress_array(compressed_array=pva_object['value'][0][self.data_type], + codec=pva_object['codec']['name'], + uncompressed_size=pva_object['uncompressedSize'], + dtype=self.NUMPY_DATA_TYPE_MAP.get(pva_object['codec']['parameters'][0]['value'])) + else: + # Handle uncompressed data + image: np.ndarray = pva_object['value'][0][self.data_type] + + # Check for missed frame starts here + # TODO: can be it's own function + current_array_id = pva_object['uniqueId'] + if self.last_array_id is not None: + self.id_diff = current_array_id - self.last_array_id - 1 + if (self.id_diff > 0): + self.frames_missed += self.id_diff + self.last_array_id = current_array_id + self.id_diff = 0 + + return image.reshape(self.shape, order=self.pixel_ordering).T if self.image_is_transposed else image.reshape(self.shape, order=self.pixel_ordering) + else: + self.image = None + raise ValueError("[PV Parsing] Image data could not be processed.") + + except Exception as e: + print(f"[PVA Reader] Failed to process image: {e}") + + def decompress_array(self, compressed_array: np.ndarray, codec: str, uncompressed_size: int, dtype: np.dtype) -> np.ndarray: + # Handle LZ4 compressed data + if codec == 'lz4': + decompressed_bytes = lz4.block.decompress(compressed_array, uncompressed_size=uncompressed_size) + # Convert bytes to numpy array with correct dtype + return np.frombuffer(decompressed_bytes, dtype=dtype) # dtype makes sure we use the correct + # Handle BSLZ4 compressed data + elif codec == 'bslz4': + # uncompressed size has to be divided by the number of bytes needed to store the desired output dtype + uncompressed_shape = (uncompressed_size // dtype.itemsize,) + # Decompress numpy array to correct datatype + return bitshuffle.decompress_lz4(compressed_array, uncompressed_shape, dtype) + # handle BLOSC compressed data + elif codec == 'blosc': + decompressed_bytes = blosc2.decompress(compressed_array) + return np.frombuffer(decompressed_bytes, dtype=dtype) + +################################## Caching #################################### + def cache_attributes(self, pv_attributes=None, rsm_attributes=None, analysis_attributes=None) -> None: + if self.CACHING_MODE == 'alignment': + self.cached_attributes.append(pv_attributes) + if rsm_attributes: + self.cached_qx.append(rsm_attributes['qx']) + self.cached_qy.append(rsm_attributes['qy']) + self.cached_qz.append(rsm_attributes['qz']) + elif not self.rsm_attributes and self.viewer_type == self.VIEWER_TYPE_MAP['rsm']: + raise AttributeError('[Caching Attributes] Could not find \'RSM\' attribute') + return + elif self.CACHING_MODE == 'scan': + # TODO: create different scan functions for if a flag pv is bool/binary vs not + # currently only works with binary/boolean flag pvs + if self.FLAG_PV in pv_attributes: + flag_value = pv_attributes[self.FLAG_PV] + if flag_value == self.START_SCAN: + if not self.is_caching: + self.is_caching = True + self.is_scan_complete = False + self.scan_state_changed.emit(True) + elif (flag_value == self.STOP_SCAN) and self.is_caching == True: + self.is_caching = False + self.is_scan_complete = True + self.scan_state_changed.emit(False) + + if self.is_caching: + self.cached_attributes.append(pv_attributes) + if rsm_attributes: + self.cached_qx.append(rsm_attributes['qx']) + self.cached_qy.append(rsm_attributes['qy']) + self.cached_qz.append(rsm_attributes['qz']) + elif not rsm_attributes and self.viewer_type == self.VIEWER_TYPE_MAP['rsm']: + raise AttributeError('[Caching Attributes] Could not find \'RSM\' attribute') + else: + raise AttributeError('[Caching Attributes] Flag_PV not found') + elif self.CACHING_MODE == 'bin': + bin_index = (self.frames_received + self.frames_missed - 1) % self.BIN_COUNT + self.cached_attributes[bin_index].append(pv_attributes) + + def cache_image(self, image) -> None: + if self.CACHING_MODE == 'alignment': + self.cached_images.append(image) + return + elif self.CACHING_MODE == 'scan': + if self.is_caching: + self.cached_images.append(image) + return + elif self.CACHING_MODE == 'bin': + if self.viewer_type == 'i': + bin_index = (self.frames_received + self.frames_missed - 1) % self.BIN_COUNT + self.cached_images[bin_index].append(image) + return + + def reset_caches(self) -> None: + self.cached_images.clear() + self.cached_attributes.clear() + self.cached_qx.clear() + self.cached_qy.clear() + self.cached_qz.clear() + +########################### Start and Stop Channel Monitors ########################## + def start_channel_monitor(self, callback=None) -> None: + """ + Subscribes to the PVA channel with a callback function and starts monitoring for PV changes. + Args: + callback (function, optional): A custom callback to use for the monitor. + If None, defaults to self.pva_callbackSuccess. + """ + monitor_callback = callback if callback is not None else self.pva_callbackSuccess + self.channel.subscribe('pva_monitor', monitor_callback) + self.channel.startMonitor() + + def stop_channel_monitor(self) -> None: + """ + Stops all monitoring and callback functions. + """ + self.channel.unsubscribe('pva_monitor') + self.channel.stopMonitor() + + def start_roi_backup_monitor(self) -> None: + try: + for roi_num, roi_dict in self.config['ROI'].items(): + for config_key, pv_name in roi_dict.items(): + name_components = pv_name.split(":") + + roi_key = name_components[1] # ROI1-ROI4 + pv_key = name_components[2] # MinX, MinY, SizeX, SizeY + + self.rois.setdefault(roi_key, {}).update({pv_key: caget(pv_name)}) + camonitor(pvname=pv_name, callback=self.roi_backup_callback) + except Exception as e: + print(f'[PVA Reader] Failed to setup backup ROI monitor: {e}') + + ################################# Getters ################################# + def get_cached_images(self) -> list[np.ndarray]: + return list(self.cached_images) + + def get_cached_attributes(self) -> list[dict]: + return list(self.cached_attributes) + + def get_cached_rsm(self) -> tuple[list[np.ndarray], list[np.ndarray], list[np.ndarray]]: + if len(self.cached_qx) == len(self.cached_qy) == len(self.cached_qz): + return list(self.cached_qx), list(self.cached_qy), list(self.cached_qz) + else: + raise ValueError("[PVA Reader] Cached qx, qy, and qz must have the same length.") + + def get_all_caches(self, clear_caches: bool=False) -> dict: + """ + Returns all cached data. + + Args: + clear_caches (bool): Whether to clear the caches after returning the data. + """ + images = self.get_cached_images() + attributes = self.get_cached_attributes() + + # Only get RSM data if HKL is configured and viewer type supports it + if self.HKL_IN_CONFIG and self.viewer_type != 'i': + rsm = self.get_cached_rsm() + # Check lengths including RSM data + if len(images) == len(attributes) == len(rsm[0]) == len(rsm[1]) == len(rsm[2]): + data = { + 'images': images, + 'attributes': attributes, + 'rsm': rsm + } + else: + raise ValueError("[PVA Reader] Cached data must have the same length.") + else: + # For image viewer type or when no HKL config, only check images and attributes + rsm = ([], [], []) # Empty RSM data + if len(images) == len(attributes): + data = { + 'images': images, + 'attributes': attributes, + 'rsm': rsm + } + else: + raise ValueError("[PVA Reader] Cached data must have the same length.") + + + if clear_caches: + self.reset_caches() + + return data + + def get_output_file_location(self) -> dict: + if self.caches_initialized and self.cached_attributes: + latest_attribute: dict = self.cached_attributes[-1] + else: + latest_attribute: dict = self.pv_attributes + + file_path_pv = latest_attribute.get('FilePath:Value', '') + file_name_pv = latest_attribute.get('FileName:Value', '') + + if file_path_pv != ' ' and file_name_pv != ' ': + return {'FilePath': file_path_pv, + 'FileName': file_name_pv} + else: + return {'FilePath': str(self.OUTPUT_FILE_LOCATION).strip()} + + + def get_config_settings(self) -> dict: + config_settings = {'OUTPUT_FILE_CONFIG' : self.get_output_file_location(), + 'ROI_IN_CONFIG' : self.ROI_IN_CONFIG, + 'ANALYSIS_IN_CONFIG' : self.ANALYSIS_IN_CONFIG, + 'HKL_IN_CONFIG' : self.HKL_IN_CONFIG, + 'CACHE_OPTIONS' : self.CACHE_OPTIONS, + 'caches_initialized' : self.caches_initialized} + + return config_settings + + def get_frames_missed(self) -> int: + """ + Returns the number of frames missed. + + Returns: + int: The number of missed frames. + """ + return self.frames_missed + + def get_latest_image(self) -> np.ndarray: + """ + Returns the current PVA image. + + Returns: + numpy.ndarray: The current image array. + """ + return self.image + + def get_latest_attributes(self) -> list[dict]: + """ + Returns the attributes of the current PVA object. + + Returns: + list: The attributes of the current PVA object. + """ + return self.attributes + + def get_shape(self) -> tuple[int]: + return self.shape + + diff --git a/utils/rsm_converter.py b/utils/rsm_converter.py new file mode 100644 index 0000000..f78e595 --- /dev/null +++ b/utils/rsm_converter.py @@ -0,0 +1,215 @@ +import toml +import numpy as np +import h5py +import xrayutilities as xu +from typing import Dict, Optional + +"""Utilities for converting detector frames into reciprocal space (RSM). +This module provides a concise RSMConverter focused on the essential +pipeline: reading metadata, building geometry, and computing Q-space. +""" + +class Data: + """Simple container for 3D points and intensities.""" + def __init__(self, points: np.ndarray, intensities: np.ndarray, metadata: dict = None, num_images: int = 0, shape: tuple = None): + self.points = points + self.intensities = intensities + self.metadata = metadata + self.num_images = num_images + self.shape = shape + +class RSMConverter: + """Compute reciprocal space mapping (RSM) from HDF5 detector data. + + Responsibilities: + - Read HKL metadata and detector setup from file + - Build xrayutilities geometry and convert Angles → Q-space + - Provide a compact public API for loading and computing + """ + def __init__(self, config_path: str = "pv_configs/s6lambda.toml"): + """Initialize with a TOML HKL config path (used by other parts of the app).""" + self.hkl_config: Dict[str, Dict[str, str]] = {} + config = toml.load(config_path) + if "HKL" in config: + self.hkl_config = config["HKL"] + + # Public API + def load_data(self, file_path: Optional[str] = None) -> Data: + """Load points and intensities from an HDF5 file and return Data. + If precomputed qx, qy, qz are absent, compute Q-space via RSM. + """ + path = file_path or getattr(self, "file_path", None) + if not path: + raise FileNotFoundError("No file path provided to load_data and none set in DashAnalysis.") + points_3d, intensities, num_images, shape = self.load_h5_to_3d(path) + return Data(points=points_3d, intensities=intensities, metadata=None, num_images=num_images, shape=shape) + + def load_h5_to_3d(self, filename: str): + """Load q-points and intensities; compute Q if not present in file.""" + with h5py.File(filename, "r") as f: + shape = f["entry/data/data"].shape + qx = self.take_data_by_key(filename, "qx") + if qx is not None: + qy = self.take_data_by_key(filename, "qy") + qz = self.take_data_by_key(filename, "qz") + q = np.column_stack((np.ravel(qx), np.ravel(qy), np.ravel(qz))) + else: + q = self.get_q_points(filename) + intensity = self.get_intensity(filename) + return q, intensity, shape[0], shape[1:] + + def create_rsm(self, filename: str, frame: int): + """Create reciprocal space mapping for a single frame using xrayutilities.""" + try: + with h5py.File(filename, "r") as f: + shape = f["entry/data/data"].shape + sc_dir, sc_pos, dc_dir, dc_pos = self.get_sample_and_detector_circles(f, frame) + primary, inplane, surface, ub, energy = self.get_physics_params(f) + qconv = xu.experiment.QConversion(sc_dir, dc_dir, primary) + hxrd = xu.HXRD(inplane, surface, en=energy, qconv=qconv) + p_dir1, p_dir2, cch1, cch2, nch1, nch2, pw1, pw2, dist, roi = self.get_detector_setup(f, shape) + hxrd.Ang2Q.init_area( + p_dir1, p_dir2, + cch1=cch1, cch2=cch2, + Nch1=nch1, Nch2=nch2, + pwidth1=pw1, pwidth2=pw2, + distance=dist, + roi=roi, + ) + angles = [*sc_pos, *dc_pos] + return hxrd.Ang2Q.area(*angles, UB=ub) + except Exception: + raise + + def get_q_points(self, filename: str) -> np.ndarray: + """Compute Q points for all frames and return flattened (N, 3) array.""" + with h5py.File(filename, "r") as f: + n_frames = f["entry/data/data"].shape[0] + qxyz_stack = np.stack([self.create_rsm(filename, i) for i in range(n_frames)], axis=0) + return np.column_stack(( + qxyz_stack[:, 0, ...].ravel(), + qxyz_stack[:, 1, ...].ravel(), + qxyz_stack[:, 2, ...].ravel(), + )) + + # Physics & Metadata + def get_physics_params(self, h5_file: h5py.File): + """Extract beam directions, UB matrix, and energy from HKL metadata.""" + meta = h5_file["entry/data/metadata/HKL"] + primary = [meta[f"PRIMARY_BEAM_DIRECTION/AXIS_NUMBER_{i}"][0] for i in range(1, 4)] + inplane = [meta[f"INPLANE_REFERENCE_DIRECITON/AXIS_NUMBER_{i}"][0] for i in range(1, 4)] + surface = [meta[f"SAMPLE_SURFACE_NORMAL_DIRECITON/AXIS_NUMBER_{i}"][0] for i in range(1, 4)] + ub = self.get_ub_matrix_from_file(h5_file) + energy = float(meta["SPEC/ENERGY_VALUE"][0]) * 1000.0 + return primary, inplane, surface, ub, energy + + def get_intensity(self, filename: str) -> np.ndarray: + """Return detector intensities as a flattened array.""" + with h5py.File(filename, "r") as f: + return f["entry/data/data"][:].ravel() + + # Detector parameters + def get_detector_setup(self, h5_file: h5py.File, shape: tuple): + """Return detector setup: directions, center pixels, size, pixel widths, distance, roi.""" + det = h5_file["entry/data/metadata/HKL/DETECTOR_SETUP"] + roi = [0, shape[1], 0, shape[2]] + p_dir1 = self._first_str(det["PIXEL_DIRECTION_1"]) + p_dir2 = self._first_str(det["PIXEL_DIRECTION_2"]) + cch1 = int(det["CENTER_CHANNEL_PIXEL"][0]) + cch2 = int(det["CENTER_CHANNEL_PIXEL"][1]) + size = det["SIZE"][...] + pw1 = float(size[0]) / float(shape[1]) + pw2 = float(size[1]) / float(shape[2]) + dist = float(det["DISTANCE"][0]) + return p_dir1, p_dir2, cch1, cch2, shape[1], shape[2], pw1, pw2, dist, roi + + # Geometry extraction + def get_sample_and_detector_circles(self, h5_file: h5py.File, frame: int): + """Return lists of direction strings and positions for sample and detector circles.""" + sc_dir, sc_pos, dc_dir, dc_pos = [], [], [], [] + hkl_base = "entry/data/metadata/HKL" + sample_priority = ["MU", "ETA", "CHI", "PHI"] + detector_priority = ["NU", "DELTA"] + + # Prefer fallback SAMPLE_CIRCLE_AXIS_1..4 if available, else canonical + fallback_found = False + for i in range(1, 5): + path = f"{hkl_base}/SAMPLE_CIRCLE_AXIS_{i}" + if path in h5_file: + fallback_found = True + dir_val = self._first_str(h5_file[f"{path}/DIRECTION_AXIS"]) + sc_dir.append(dir_val) + sc_pos.append(float(h5_file[f"{path}/POSITION"][frame])) + if not fallback_found: + for axis in sample_priority: + path = f"{hkl_base}/{axis}" + if path in h5_file: + dir_val = self._first_str(h5_file[f"{path}/DIRECTION_AXIS"]) + sc_dir.append(dir_val) + sc_pos.append(float(h5_file[f"{path}/POSITION"][frame])) + + # Prefer fallback DETECTOR_CIRCLE_AXIS_1..2 if available, else canonical + fallback_d_found = False + for i in range(1, 3): + path = f"{hkl_base}/DETECTOR_CIRCLE_AXIS_{i}" + if path in h5_file: + fallback_d_found = True + dir_val = self._first_str(h5_file[f"{path}/DIRECTION_AXIS"]) + dc_dir.append(dir_val) + dc_pos.append(float(h5_file[f"{path}/POSITION"][frame])) + if not fallback_d_found: + for axis in detector_priority: + path = f"{hkl_base}/{axis}" + if path in h5_file: + dir_val = self._first_str(h5_file[f"{path}/DIRECTION_AXIS"]) + dc_dir.append(dir_val) + dc_pos.append(float(h5_file[f"{path}/POSITION"][frame])) + + return list(sc_dir), list(sc_pos), list(dc_dir), list(dc_pos) + + # UB helpers + def get_ub_matrix_from_file(self, h5_file: h5py.File) -> np.ndarray: + """Return UB 3x3 by slicing first 9 values from file-based path.""" + path = "entry/data/metadata/HKL/SPEC/UB_MATRIX_VALUE" + if path in h5_file: + return self._ub_from_values(h5_file[path][...]) + raise KeyError(f"UB Matrix link missing at {path}") + + # HDF5 utilities + def take_data_by_key(self, file_path, target_key): + """Return dataset whose path ends with target_key, or None if not found.""" + with h5py.File(file_path, "r") as f: + found_path = None + def find_key(name, obj): + nonlocal found_path + if name.endswith(target_key): + found_path = name + return True + f.visititems(find_key) + if found_path: + ds = f[found_path] + return ds.asstr()[:] if ds.dtype == "O" else ds[:] + else: + print(f"Key '{target_key}' not found in file.") + return None + + # Internal helpers + def _first_str(self, ds) -> str: + """Return first element of a dataset as string, decoding bytes when needed.""" + try: + return ds.asstr()[0] + except Exception: + val = ds[0] + if isinstance(val, (bytes, np.bytes_)): + try: + return val.decode("utf-8") + except Exception: + return str(val) + return str(val) + + def _ub_from_values(self, ub_values) -> np.ndarray: + """Take first 9 elements and reshape to 3×3 UB matrix.""" + arr = np.asarray(ub_values).ravel() + if arr.size < 9: + raise ValueError(f"UB matrix requires at least 9 elements, got {arr.size}") + return arr[:9].reshape(3, 3) diff --git a/utils/size_manager.py b/utils/size_manager.py new file mode 100644 index 0000000..10b4c1b --- /dev/null +++ b/utils/size_manager.py @@ -0,0 +1,199 @@ +from PyQt5.QtCore import QObject, QEvent +from PyQt5.QtWidgets import QWidget, QApplication, QScrollArea + +class SizeManager(QObject): + """ + Manages how QWidgets, QFonts, and other QObjects automatically + scale, when resized + + # Usage: + app = QApplication() + size_manager = SizeManger(app) + That's it now you have automatic scaling + """ + + def __init__(self, app: QApplication) -> None: + """ + Initialize the size manager with a QApplication. + + Args: + app (QApplication): The application to manage for window resize events + """ + super().__init__() + + # Application reference + self.app = app + + # Storage for original values of the widgets + self.original_font_size = {} + self.base_window_widths = {} + + # Install event monitoring + self.app.installEventFilter(self) + + + def eventFilter(self, obj: QObject, event: QEvent) -> bool: + """ + Monitors window objects resize events to trigger scaling. + + Args: + obj (QObject): The object being monitored for events + event (QEvent): The event that occurred (checking for resize) + """ + if event.type() == QEvent.Resize and isinstance(obj, QWidget) and obj.isWindow(): + self.scale_widgets(obj) + return super().eventFilter(obj, event) + + + def scale_widgets(self, window: QWidget) -> None: + """ + Calculates scale factor and applies scaling to window widgets. + + Args: + window (QWidget): The window that was resized + """ + + # Check that the curr_width and window is within the dict + curr_width = window.width() + if window not in self.base_window_widths: + self.base_window_widths[window] = curr_width + + # Create a scale + base_width = self.base_window_widths[window] + scale = curr_width / base_width + self._apply_scaling(window=window, scale=scale) + + + def _apply_scaling(self, window: QWidget, scale: float) -> None: + """ + Applies font and geometry scaling to all widgets in the window. + + Args: + window (QWidget): The window containing widgets to scale + scale (float): The scaling factor to apply (1.0 = original size) + """ + + # Ensures that scaling is 50% < scale < 200% of original size + # scale = 0.2 then scale = 0.5 + # scale = 3.0 then scale = 2.0 + scale = max(0.5, min(2.0, scale)) + + # Lazily inits of the storage dictionaries + if not hasattr(self, 'original_geometries'): + self.original_geometries = {} + if not hasattr(self, 'original_sizes'): + self.original_sizes = {} + if not hasattr(self, 'original_margins'): + self.original_margins = {} + + # Scales a layout if it's a layout + if window.layout(): + self._scale_layout(window.layout(), scale) + + # Iterates through all the widgets within window + for widget in window.findChildren(QWidget): + + # -- ONLY FOR: scaling for QScrollArea -- # + if isinstance(widget, QScrollArea): + + # Set the values of the widgets original size + if widget not in self.original_sizes: + self.original_sizes[widget] = { + 'min': widget.minimumSize(), + 'max': widget.maximumSize(), + 'size_hint': widget.sizeHint() + } + + # Scaling the minimum HxW of the widget + orig_sizes = self.original_sizes[widget] + if orig_sizes['min'].width() > 0: + widget.setMinimumWidth(int(orig_sizes['min'].width() * scale)) + if orig_sizes['min'].height() > 0: + widget.setMinimumHeight(int(orig_sizes['min'].height() * scale)) + + # Stores the contents of the QScrollArea to + # the original size dict. + if widget.widget(): + content_widget = widget.widget() + if content_widget not in self.original_sizes: + self.original_sizes[content_widget] = { + 'min': content_widget.minimumSize(), + 'size_hint': content_widget.sizeHint() + } + # ----------------------------------------- # + + # Stores a widget font size within a dictionary + if widget not in self.original_font_size: + self.original_font_size[widget] = widget.font().pointSizeF() + + # Scales and sets the widgets font + font = widget.font() + font.setPointSizeF(self.original_font_size[widget] * scale) + widget.setFont(font) + + # Set the values of the widgets original size + if widget not in self.original_sizes: + self.original_sizes[widget] = { + 'min': widget.minimumSize(), + 'max': widget.maximumSize(), + 'size_hint': widget.sizeHint() + } + + # Set the scaled Min and Max HxW + if widget.parent() and widget.parent().layout(): + orig_sizes = self.original_sizes[widget] + if orig_sizes['min'].width() > 0: + widget.setMinimumWidth(int(orig_sizes['min'].width() * scale)) + if orig_sizes['min'].height() > 0: + widget.setMinimumHeight(int(orig_sizes['min'].height() * scale)) + if orig_sizes['max'].width() < 16777215: + widget.setMaximumWidth(int(orig_sizes['max'].width() * scale)) + if orig_sizes['max'].height() < 16777215: + widget.setMaximumHeight(int(orig_sizes['max'].height() * scale)) + + # Store the widget in geom dict if parent is not a layout + else: + if widget not in self.original_geometries: + self.original_geometries[widget] = widget.geometry() + + # Scale the geomtry + orig_geom = self.original_geometries[widget] + widget.setGeometry( + int(orig_geom.x() * scale), + int(orig_geom.y() * scale), + int(orig_geom.width() * scale), + int(orig_geom.height() * scale) + ) + + # Scale layouts + if widget.layout(): + self._scale_layout(widget.layout(), scale) + + + def _scale_layout(self, layout, scale: float) -> None: + """ + Scale spacing and margins of a layout proportionally. + + Args: + layout: The layout object to scale (QLayout subclass) + scale (float): The scaling factor to apply to spacing and margins + """ + # Store the original margins and spacing + if layout not in self.original_margins: + self.original_margins[layout] = { + 'spacing': layout.spacing(), + 'margins': layout.getContentsMargins() + } + + # Scale and set the spacing + orig = self.original_margins[layout] + layout.setSpacing(int(orig['spacing'] * scale)) + + # Scale and set the margins + margins = orig['margins'] + layout.setContentsMargins( + int(margins[0] * scale), + int(margins[1] * scale), + int(margins[2] * scale), + int(margins[3] * scale) + ) diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..2fbd448 --- /dev/null +++ b/uv.lock @@ -0,0 +1,3324 @@ +version = 1 +revision = 3 +requires-python = "==3.11.*" +resolution-markers = [ + "sys_platform == 'darwin'", + "platform_machine == 'aarch64' and sys_platform == 'linux'", + "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')", +] + +[[package]] +name = "addict" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/ef/fd7649da8af11d93979831e8f1f8097e85e82d5bfeabc8c68b39175d8e75/addict-2.4.0.tar.gz", hash = "sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494", size = 9186, upload-time = "2020-11-21T16:21:31.416Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/00/b08f23b7d7e1e14ce01419a467b583edbb93c6cdb8654e54a9cc579cd61f/addict-2.4.0-py3-none-any.whl", hash = "sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc", size = 3832, upload-time = "2020-11-21T16:21:29.588Z" }, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.13.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/74/b321e7d7ca762638cdf8cdeceb39755d9c745aff7a64c8789be96ddf6e96/aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0", size = 743409, upload-time = "2025-10-28T20:56:00.354Z" }, + { url = "https://files.pythonhosted.org/packages/99/3d/91524b905ec473beaf35158d17f82ef5a38033e5809fe8742e3657cdbb97/aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb", size = 497006, upload-time = "2025-10-28T20:56:01.85Z" }, + { url = "https://files.pythonhosted.org/packages/eb/d3/7f68bc02a67716fe80f063e19adbd80a642e30682ce74071269e17d2dba1/aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9", size = 493195, upload-time = "2025-10-28T20:56:03.314Z" }, + { url = "https://files.pythonhosted.org/packages/98/31/913f774a4708775433b7375c4f867d58ba58ead833af96c8af3621a0d243/aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613", size = 1747759, upload-time = "2025-10-28T20:56:04.904Z" }, + { url = "https://files.pythonhosted.org/packages/e8/63/04efe156f4326f31c7c4a97144f82132c3bb21859b7bb84748d452ccc17c/aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead", size = 1704456, upload-time = "2025-10-28T20:56:06.986Z" }, + { url = "https://files.pythonhosted.org/packages/8e/02/4e16154d8e0a9cf4ae76f692941fd52543bbb148f02f098ca73cab9b1c1b/aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780", size = 1807572, upload-time = "2025-10-28T20:56:08.558Z" }, + { url = "https://files.pythonhosted.org/packages/34/58/b0583defb38689e7f06798f0285b1ffb3a6fb371f38363ce5fd772112724/aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a", size = 1895954, upload-time = "2025-10-28T20:56:10.545Z" }, + { url = "https://files.pythonhosted.org/packages/6b/f3/083907ee3437425b4e376aa58b2c915eb1a33703ec0dc30040f7ae3368c6/aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592", size = 1747092, upload-time = "2025-10-28T20:56:12.118Z" }, + { url = "https://files.pythonhosted.org/packages/ac/61/98a47319b4e425cc134e05e5f3fc512bf9a04bf65aafd9fdcda5d57ec693/aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab", size = 1606815, upload-time = "2025-10-28T20:56:14.191Z" }, + { url = "https://files.pythonhosted.org/packages/97/4b/e78b854d82f66bb974189135d31fce265dee0f5344f64dd0d345158a5973/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30", size = 1723789, upload-time = "2025-10-28T20:56:16.101Z" }, + { url = "https://files.pythonhosted.org/packages/ed/fc/9d2ccc794fc9b9acd1379d625c3a8c64a45508b5091c546dea273a41929e/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40", size = 1718104, upload-time = "2025-10-28T20:56:17.655Z" }, + { url = "https://files.pythonhosted.org/packages/66/65/34564b8765ea5c7d79d23c9113135d1dd3609173da13084830f1507d56cf/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948", size = 1785584, upload-time = "2025-10-28T20:56:19.238Z" }, + { url = "https://files.pythonhosted.org/packages/30/be/f6a7a426e02fc82781afd62016417b3948e2207426d90a0e478790d1c8a4/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf", size = 1595126, upload-time = "2025-10-28T20:56:20.836Z" }, + { url = "https://files.pythonhosted.org/packages/e5/c7/8e22d5d28f94f67d2af496f14a83b3c155d915d1fe53d94b66d425ec5b42/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782", size = 1800665, upload-time = "2025-10-28T20:56:22.922Z" }, + { url = "https://files.pythonhosted.org/packages/d1/11/91133c8b68b1da9fc16555706aa7276fdf781ae2bb0876c838dd86b8116e/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8", size = 1739532, upload-time = "2025-10-28T20:56:25.924Z" }, + { url = "https://files.pythonhosted.org/packages/17/6b/3747644d26a998774b21a616016620293ddefa4d63af6286f389aedac844/aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec", size = 431876, upload-time = "2025-10-28T20:56:27.524Z" }, + { url = "https://files.pythonhosted.org/packages/c3/63/688462108c1a00eb9f05765331c107f95ae86f6b197b865d29e930b7e462/aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c", size = 456205, upload-time = "2025-10-28T20:56:29.062Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, +] + +[[package]] +name = "anyio" +version = "4.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, +] + +[[package]] +name = "appnope" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee", size = 4170, upload-time = "2024-02-06T09:43:11.258Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c", size = 4321, upload-time = "2024-02-06T09:43:09.663Z" }, +] + +[[package]] +name = "argon2-cffi" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argon2-cffi-bindings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/89/ce5af8a7d472a67cc819d5d998aa8c82c5d860608c4db9f46f1162d7dab9/argon2_cffi-25.1.0.tar.gz", hash = "sha256:694ae5cc8a42f4c4e2bf2ca0e64e51e23a040c6a517a85074683d3959e1346c1", size = 45706, upload-time = "2025-06-03T06:55:32.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/d3/a8b22fa575b297cd6e3e3b0155c7e25db170edf1c74783d6a31a2490b8d9/argon2_cffi-25.1.0-py3-none-any.whl", hash = "sha256:fdc8b074db390fccb6eb4a3604ae7231f219aa669a2652e0f20e16ba513d5741", size = 14657, upload-time = "2025-06-03T06:55:30.804Z" }, +] + +[[package]] +name = "argon2-cffi-bindings" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/2d/db8af0df73c1cf454f71b2bbe5e356b8c1f8041c979f505b3d3186e520a9/argon2_cffi_bindings-25.1.0.tar.gz", hash = "sha256:b957f3e6ea4d55d820e40ff76f450952807013d361a65d7f28acc0acbf29229d", size = 1783441, upload-time = "2025-07-30T10:02:05.147Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/57/96b8b9f93166147826da5f90376e784a10582dd39a393c99bb62cfcf52f0/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:aecba1723ae35330a008418a91ea6cfcedf6d31e5fbaa056a166462ff066d500", size = 54121, upload-time = "2025-07-30T10:01:50.815Z" }, + { url = "https://files.pythonhosted.org/packages/0a/08/a9bebdb2e0e602dde230bdde8021b29f71f7841bd54801bcfd514acb5dcf/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2630b6240b495dfab90aebe159ff784d08ea999aa4b0d17efa734055a07d2f44", size = 29177, upload-time = "2025-07-30T10:01:51.681Z" }, + { url = "https://files.pythonhosted.org/packages/b6/02/d297943bcacf05e4f2a94ab6f462831dc20158614e5d067c35d4e63b9acb/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:7aef0c91e2c0fbca6fc68e7555aa60ef7008a739cbe045541e438373bc54d2b0", size = 31090, upload-time = "2025-07-30T10:01:53.184Z" }, + { url = "https://files.pythonhosted.org/packages/c1/93/44365f3d75053e53893ec6d733e4a5e3147502663554b4d864587c7828a7/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e021e87faa76ae0d413b619fe2b65ab9a037f24c60a1e6cc43457ae20de6dc6", size = 81246, upload-time = "2025-07-30T10:01:54.145Z" }, + { url = "https://files.pythonhosted.org/packages/09/52/94108adfdd6e2ddf58be64f959a0b9c7d4ef2fa71086c38356d22dc501ea/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e924cfc503018a714f94a49a149fdc0b644eaead5d1f089330399134fa028a", size = 87126, upload-time = "2025-07-30T10:01:55.074Z" }, + { url = "https://files.pythonhosted.org/packages/72/70/7a2993a12b0ffa2a9271259b79cc616e2389ed1a4d93842fac5a1f923ffd/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c87b72589133f0346a1cb8d5ecca4b933e3c9b64656c9d175270a000e73b288d", size = 80343, upload-time = "2025-07-30T10:01:56.007Z" }, + { url = "https://files.pythonhosted.org/packages/78/9a/4e5157d893ffc712b74dbd868c7f62365618266982b64accab26bab01edc/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1db89609c06afa1a214a69a462ea741cf735b29a57530478c06eb81dd403de99", size = 86777, upload-time = "2025-07-30T10:01:56.943Z" }, + { url = "https://files.pythonhosted.org/packages/74/cd/15777dfde1c29d96de7f18edf4cc94c385646852e7c7b0320aa91ccca583/argon2_cffi_bindings-25.1.0-cp39-abi3-win32.whl", hash = "sha256:473bcb5f82924b1becbb637b63303ec8d10e84c8d241119419897a26116515d2", size = 27180, upload-time = "2025-07-30T10:01:57.759Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c6/a759ece8f1829d1f162261226fbfd2c6832b3ff7657384045286d2afa384/argon2_cffi_bindings-25.1.0-cp39-abi3-win_amd64.whl", hash = "sha256:a98cd7d17e9f7ce244c0803cad3c23a7d379c301ba618a5fa76a67d116618b98", size = 31715, upload-time = "2025-07-30T10:01:58.56Z" }, + { url = "https://files.pythonhosted.org/packages/42/b9/f8d6fa329ab25128b7e98fd83a3cb34d9db5b059a9847eddb840a0af45dd/argon2_cffi_bindings-25.1.0-cp39-abi3-win_arm64.whl", hash = "sha256:b0fdbcf513833809c882823f98dc2f931cf659d9a1429616ac3adebb49f5db94", size = 27149, upload-time = "2025-07-30T10:01:59.329Z" }, +] + +[[package]] +name = "arrow" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/33/032cdc44182491aa708d06a68b62434140d8c50820a087fac7af37703357/arrow-1.4.0.tar.gz", hash = "sha256:ed0cc050e98001b8779e84d461b0098c4ac597e88704a655582b21d116e526d7", size = 152931, upload-time = "2025-10-18T17:46:46.761Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/c9/d7977eaacb9df673210491da99e6a247e93df98c715fc43fd136ce1d3d33/arrow-1.4.0-py3-none-any.whl", hash = "sha256:749f0769958ebdc79c173ff0b0670d59051a535fa26e8eba02953dc19eb43205", size = 68797, upload-time = "2025-10-18T17:46:45.663Z" }, +] + +[[package]] +name = "asteval" +version = "1.0.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/cf/82ccb7bc570ebfc5ad8357a6ead94492b0da845ab6b6fb4468fbb21118ca/asteval-1.0.7.tar.gz", hash = "sha256:729fa010511957afcbdbadb0304da09cef6904c145b6901e917afac2dd5668a1", size = 51859, upload-time = "2025-11-06T21:42:37.045Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/da/875925db2ed80dc7b919b2817da555848b608620be9662c5f835670d5d8d/asteval-1.0.7-py3-none-any.whl", hash = "sha256:d78df08681dfff59031ca624ba7030f9dc576a7a16e2f7a5137c6e7ef3ee60c4", size = 22089, upload-time = "2025-11-06T21:42:35.245Z" }, +] + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978, upload-time = "2024-11-30T04:30:14.439Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918, upload-time = "2024-11-30T04:30:10.946Z" }, +] + +[[package]] +name = "async-lru" +version = "2.0.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/4d/71ec4d3939dc755264f680f6c2b4906423a304c3d18e96853f0a595dfe97/async_lru-2.0.5.tar.gz", hash = "sha256:481d52ccdd27275f42c43a928b4a50c3bfb2d67af4e78b170e3e0bb39c66e5bb", size = 10380, upload-time = "2025-03-16T17:25:36.919Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/49/d10027df9fce941cb8184e78a02857af36360d33e1721df81c5ed2179a1a/async_lru-2.0.5-py3-none-any.whl", hash = "sha256:ab95404d8d2605310d345932697371a5f40def0487c03d6d0ad9138de52c9943", size = 6069, upload-time = "2025-03-16T17:25:35.422Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "babel" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.14.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/e9/df2358efd7659577435e2177bfa69cba6c33216681af51a707193dec162a/beautifulsoup4-4.14.2.tar.gz", hash = "sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e", size = 625822, upload-time = "2025-09-29T10:05:42.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/fe/3aed5d0be4d404d12d36ab97e2f1791424d9ca39c2f754a6285d59a3b01d/beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515", size = 106392, upload-time = "2025-09-29T10:05:43.771Z" }, +] + +[[package]] +name = "bitshuffle" +version = "0.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cython" }, + { name = "h5py" }, + { name = "numpy" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/34/d3/539ae1f2c7404e5396f90a9b4cca2e0d83ed1a9c8e598f94efe88130094a/bitshuffle-0.5.2.tar.gz", hash = "sha256:dc0e3fb7bdbf42be1009cc3028744180600d625a75b31833a24aa32aeaf83d8d", size = 290156, upload-time = "2024-09-26T19:04:10.899Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/69/0c95cfa1f331c6196b7bc7746188ba929a963d33ce333824a4c86dcf0880/bitshuffle-0.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1f0aea223d92629f7319657ea4a3a88e95a6ee5be20e53c5038b766ea23502f", size = 11166576, upload-time = "2024-09-26T19:03:58.972Z" }, +] + +[[package]] +name = "bleach" +version = "6.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/07/18/3c8523962314be6bf4c8989c79ad9531c825210dd13a8669f6b84336e8bd/bleach-6.3.0.tar.gz", hash = "sha256:6f3b91b1c0a02bb9a78b5a454c92506aa0fdf197e1d5e114d2e00c6f64306d22", size = 203533, upload-time = "2025-10-27T17:57:39.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/3a/577b549de0cc09d95f11087ee63c739bba856cd3952697eec4c4bb91350a/bleach-6.3.0-py3-none-any.whl", hash = "sha256:fe10ec77c93ddf3d13a73b035abaac7a9f5e436513864ccdad516693213c65d6", size = 164437, upload-time = "2025-10-27T17:57:37.538Z" }, +] + +[package.optional-dependencies] +css = [ + { name = "tinycss2" }, +] + +[[package]] +name = "blinker" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460, upload-time = "2024-11-08T17:25:47.436Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458, upload-time = "2024-11-08T17:25:46.184Z" }, +] + +[[package]] +name = "blosc2" +version = "3.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "msgpack" }, + { name = "ndindex" }, + { name = "numexpr", marker = "platform_machine != 'wasm32'" }, + { name = "numpy" }, + { name = "platformdirs" }, + { name = "py-cpuinfo", marker = "platform_machine != 'wasm32'" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4d/da/6abdd7d7d9eec18a2c7f7f48a652c8679a78fc39b3b6c6723e933a65ce5f/blosc2-3.11.0.tar.gz", hash = "sha256:458a29304899000658f43411e2bfe08f6f1ea4feb4c82909002ff1b066a11886", size = 3972012, upload-time = "2025-10-28T18:03:24.305Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/56/27a98fb2774e75022a69ddf39ab59149723e58648186138db709e83a0c34/blosc2-3.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:68f241de958e88d33c99620b59069612b5f9cafa7108e67f1b937a6333d0b622", size = 3962054, upload-time = "2025-10-28T18:02:45.452Z" }, + { url = "https://files.pythonhosted.org/packages/81/54/0660b7c44905500e4fcbea7fa99594283efef35b4250dcb6cad2588f44cb/blosc2-3.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d1c815f23d18dc97eecc4ba096ef0c1274efb9b198c6561d757642fc9ae674c", size = 3463769, upload-time = "2025-10-28T18:02:46.642Z" }, + { url = "https://files.pythonhosted.org/packages/74/a1/c5652459e8fccc56a8875133abce4b86e5832ef2e5d7f9ea5521025b5d96/blosc2-3.11.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2ea6d3303d031a4458148acef5512b70d416860ec9739fd7cb79871b6ec063ed", size = 4386624, upload-time = "2025-10-28T18:02:47.837Z" }, + { url = "https://files.pythonhosted.org/packages/be/d4/ed900b60558767c96ce9c5ed3352589f2316501b5f2e51b19c5efa8bd310/blosc2-3.11.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aeef35f0138544de195fd742a667ae81f5437f97847827c70e72f5b55eb3b2fe", size = 4520796, upload-time = "2025-10-28T18:02:49.17Z" }, + { url = "https://files.pythonhosted.org/packages/8f/84/18573da56a95e3c93ce256d4e2404efc560dc862dcd598a1bd7d6ba3ac8a/blosc2-3.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:4d42f3d2c24d5bdbe0827647e4a7667b074c82e406343e70f7123ff346750af7", size = 2284512, upload-time = "2025-10-28T18:02:50.732Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, + { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, + { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, + { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, + { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" }, + { url = "https://files.pythonhosted.org/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" }, + { url = "https://files.pythonhosted.org/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" }, + { url = "https://files.pythonhosted.org/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" }, + { url = "https://files.pythonhosted.org/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" }, + { url = "https://files.pythonhosted.org/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" }, + { url = "https://files.pythonhosted.org/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" }, + { url = "https://files.pythonhosted.org/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" }, + { url = "https://files.pythonhosted.org/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" }, + { url = "https://files.pythonhosted.org/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/61/de6cd827efad202d7057d93e0fed9294b96952e188f7384832791c7b2254/click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4", size = 276943, upload-time = "2025-09-18T17:32:23.696Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc", size = 107295, upload-time = "2025-09-18T17:32:22.42Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "coloredlogs" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "humanfriendly" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/c7/eed8f27100517e8c0e6b923d5f0845d0cb99763da6fdee00478f91db7325/coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0", size = 278520, upload-time = "2021-06-11T10:22:45.202Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/06/3d6badcf13db419e25b07041d9c7b4a2c331d3f4e7134445ec5df57714cd/coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934", size = 46018, upload-time = "2021-06-11T10:22:42.561Z" }, +] + +[[package]] +name = "comm" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/13/7d740c5849255756bc17888787313b61fd38a0a8304fc4f073dfc46122aa/comm-0.2.3.tar.gz", hash = "sha256:2dc8048c10962d55d7ad693be1e7045d891b7ce8d999c97963a5e3e99c055971", size = 6319, upload-time = "2025-07-25T14:02:04.452Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl", hash = "sha256:c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417", size = 7294, upload-time = "2025-07-25T14:02:02.896Z" }, +] + +[[package]] +name = "configargparse" +version = "1.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/4d/6c9ef746dfcc2a32e26f3860bb4a011c008c392b83eabdfb598d1a8bbe5d/configargparse-1.7.1.tar.gz", hash = "sha256:79c2ddae836a1e5914b71d58e4b9adbd9f7779d4e6351a637b7d2d9b6c46d3d9", size = 43958, upload-time = "2025-05-23T14:26:17.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/28/d28211d29bcc3620b1fece85a65ce5bb22f18670a03cd28ea4b75ede270c/configargparse-1.7.1-py3-none-any.whl", hash = "sha256:8b586a31f9d873abd1ca527ffbe58863c99f36d896e2829779803125e83be4b6", size = 25607, upload-time = "2025-05-23T14:26:15.923Z" }, +] + +[[package]] +name = "contourpy" +version = "1.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/2e/c4390a31919d8a78b90e8ecf87cd4b4c4f05a5b48d05ec17db8e5404c6f4/contourpy-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:709a48ef9a690e1343202916450bc48b9e51c049b089c7f79a267b46cffcdaa1", size = 288773, upload-time = "2025-07-26T12:01:02.277Z" }, + { url = "https://files.pythonhosted.org/packages/0d/44/c4b0b6095fef4dc9c420e041799591e3b63e9619e3044f7f4f6c21c0ab24/contourpy-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:23416f38bfd74d5d28ab8429cc4d63fa67d5068bd711a85edb1c3fb0c3e2f381", size = 270149, upload-time = "2025-07-26T12:01:04.072Z" }, + { url = "https://files.pythonhosted.org/packages/30/2e/dd4ced42fefac8470661d7cb7e264808425e6c5d56d175291e93890cce09/contourpy-1.3.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:929ddf8c4c7f348e4c0a5a3a714b5c8542ffaa8c22954862a46ca1813b667ee7", size = 329222, upload-time = "2025-07-26T12:01:05.688Z" }, + { url = "https://files.pythonhosted.org/packages/f2/74/cc6ec2548e3d276c71389ea4802a774b7aa3558223b7bade3f25787fafc2/contourpy-1.3.3-cp311-cp311-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9e999574eddae35f1312c2b4b717b7885d4edd6cb46700e04f7f02db454e67c1", size = 377234, upload-time = "2025-07-26T12:01:07.054Z" }, + { url = "https://files.pythonhosted.org/packages/03/b3/64ef723029f917410f75c09da54254c5f9ea90ef89b143ccadb09df14c15/contourpy-1.3.3-cp311-cp311-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf67e0e3f482cb69779dd3061b534eb35ac9b17f163d851e2a547d56dba0a3a", size = 380555, upload-time = "2025-07-26T12:01:08.801Z" }, + { url = "https://files.pythonhosted.org/packages/5f/4b/6157f24ca425b89fe2eb7e7be642375711ab671135be21e6faa100f7448c/contourpy-1.3.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51e79c1f7470158e838808d4a996fa9bac72c498e93d8ebe5119bc1e6becb0db", size = 355238, upload-time = "2025-07-26T12:01:10.319Z" }, + { url = "https://files.pythonhosted.org/packages/98/56/f914f0dd678480708a04cfd2206e7c382533249bc5001eb9f58aa693e200/contourpy-1.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:598c3aaece21c503615fd59c92a3598b428b2f01bfb4b8ca9c4edeecc2438620", size = 1326218, upload-time = "2025-07-26T12:01:12.659Z" }, + { url = "https://files.pythonhosted.org/packages/fb/d7/4a972334a0c971acd5172389671113ae82aa7527073980c38d5868ff1161/contourpy-1.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:322ab1c99b008dad206d406bb61d014cf0174df491ae9d9d0fac6a6fda4f977f", size = 1392867, upload-time = "2025-07-26T12:01:15.533Z" }, + { url = "https://files.pythonhosted.org/packages/75/3e/f2cc6cd56dc8cff46b1a56232eabc6feea52720083ea71ab15523daab796/contourpy-1.3.3-cp311-cp311-win32.whl", hash = "sha256:fd907ae12cd483cd83e414b12941c632a969171bf90fc937d0c9f268a31cafff", size = 183677, upload-time = "2025-07-26T12:01:17.088Z" }, + { url = "https://files.pythonhosted.org/packages/98/4b/9bd370b004b5c9d8045c6c33cf65bae018b27aca550a3f657cdc99acdbd8/contourpy-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:3519428f6be58431c56581f1694ba8e50626f2dd550af225f82fb5f5814d2a42", size = 225234, upload-time = "2025-07-26T12:01:18.256Z" }, + { url = "https://files.pythonhosted.org/packages/d9/b6/71771e02c2e004450c12b1120a5f488cad2e4d5b590b1af8bad060360fe4/contourpy-1.3.3-cp311-cp311-win_arm64.whl", hash = "sha256:15ff10bfada4bf92ec8b31c62bf7c1834c244019b4a33095a68000d7075df470", size = 193123, upload-time = "2025-07-26T12:01:19.848Z" }, + { url = "https://files.pythonhosted.org/packages/a5/29/8dcfe16f0107943fa92388c23f6e05cff0ba58058c4c95b00280d4c75a14/contourpy-1.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cd5dfcaeb10f7b7f9dc8941717c6c2ade08f587be2226222c12b25f0483ed497", size = 278809, upload-time = "2025-07-26T12:02:52.74Z" }, + { url = "https://files.pythonhosted.org/packages/85/a9/8b37ef4f7dafeb335daee3c8254645ef5725be4d9c6aa70b50ec46ef2f7e/contourpy-1.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:0c1fc238306b35f246d61a1d416a627348b5cf0648648a031e14bb8705fcdfe8", size = 261593, upload-time = "2025-07-26T12:02:54.037Z" }, + { url = "https://files.pythonhosted.org/packages/0a/59/ebfb8c677c75605cc27f7122c90313fd2f375ff3c8d19a1694bda74aaa63/contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70f9aad7de812d6541d29d2bbf8feb22ff7e1c299523db288004e3157ff4674e", size = 302202, upload-time = "2025-07-26T12:02:55.947Z" }, + { url = "https://files.pythonhosted.org/packages/3c/37/21972a15834d90bfbfb009b9d004779bd5a07a0ec0234e5ba8f64d5736f4/contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ed3657edf08512fc3fe81b510e35c2012fbd3081d2e26160f27ca28affec989", size = 329207, upload-time = "2025-07-26T12:02:57.468Z" }, + { url = "https://files.pythonhosted.org/packages/0c/58/bd257695f39d05594ca4ad60df5bcb7e32247f9951fd09a9b8edb82d1daa/contourpy-1.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:3d1a3799d62d45c18bafd41c5fa05120b96a28079f2393af559b843d1a966a77", size = 225315, upload-time = "2025-07-26T12:02:58.801Z" }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615, upload-time = "2023-10-07T05:32:18.335Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321, upload-time = "2023-10-07T05:32:16.783Z" }, +] + +[[package]] +name = "cython" +version = "3.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/36/cce2972e13e83ffe58bc73bfd9d37340b5e5113e8243841a57511c7ae1c2/cython-3.2.1.tar.gz", hash = "sha256:2be1e4d0cbdf7f4cd4d9b8284a034e1989b59fd060f6bd4d24bf3729394d2ed8", size = 3270455, upload-time = "2025-11-12T19:02:59.847Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/d4/ba7b9f341ec168de78bd659600e04bb7de3b2d069bf98b2178a135e88ea4/cython-3.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3cb32c650e7f4476941d1f735cae75a2067d5e3279576273bb8802e8ea907222", size = 2949720, upload-time = "2025-11-12T19:03:17.492Z" }, + { url = "https://files.pythonhosted.org/packages/ad/47/c42417f424c0b928361f48d7dd0ae72716ee21f647b73ceb16f66b98663e/cython-3.2.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a2b306813d7f28aa0a2c3e4e63ada1427a8109917532df942cd5429db228252", size = 3242127, upload-time = "2025-11-12T19:03:19.227Z" }, + { url = "https://files.pythonhosted.org/packages/e6/fc/1040460889129551649ec35be45e05169871fbcf71bd8e13c533e86f9468/cython-3.2.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0959d9a36d4f004ce63acc1474b3c606745af98b65e8ae709efd0c10988e9d6b", size = 3377094, upload-time = "2025-11-12T19:03:21.25Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f2/8c754298eefa40e21af0ae3592837c6e71254900d5aea1c8859e96b11de5/cython-3.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:60c62e734421365135cc2842013d883136054a26c617c001be494235edfc447a", size = 2767824, upload-time = "2025-11-12T19:03:23.317Z" }, + { url = "https://files.pythonhosted.org/packages/5a/7e/1194f4ba98b981bbdca945a292e4f49e87ea09d69516b24445409e7cf611/cython-3.2.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:4e9167316bf6ecfea33dcca62f074605648fb93cc053ef46b5deb3e5d12fc0d3", size = 2872858, upload-time = "2025-11-12T19:03:55.074Z" }, + { url = "https://files.pythonhosted.org/packages/6b/1a/393ca8ffec7ad3f02b8e4bffaba3dba4fb62c4a1c4c0b6dbf3b80e709fe3/cython-3.2.1-cp39-abi3-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3095df6cd470064742f428c937bed7200c5123b9e19ee04aa09ec61281e565a3", size = 3209664, upload-time = "2025-11-12T19:03:56.771Z" }, + { url = "https://files.pythonhosted.org/packages/37/57/f209f64c609d3d8fac60a572e56da2f621dc1789e399c58db61d5645a31f/cython-3.2.1-cp39-abi3-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:db3f53b2d9afb206075a2605f1150aa019f0733c7795a38eccc6119c2e9c3f7b", size = 2854607, upload-time = "2025-11-12T19:03:59.413Z" }, + { url = "https://files.pythonhosted.org/packages/fc/af/1e5c73fe52423f40776130b0be914fd9f9f8dc26c4f6ea4c2ed04772d558/cython-3.2.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0fc5e7687ac8f8e2b2fb95648f43e9e074ebaa72fd5cb3d8e20e5f1e8b8e02d9", size = 2991567, upload-time = "2025-11-12T19:04:02.209Z" }, + { url = "https://files.pythonhosted.org/packages/39/2c/3ea175b6b1fdfb429f9e9c395240d894155b3c0615caced05fef43264cba/cython-3.2.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:bbb3bc152bc0de82b031c8d355418fa4890a92424209d59366c2c0bc9e6cf53c", size = 2889178, upload-time = "2025-11-12T19:04:05.272Z" }, + { url = "https://files.pythonhosted.org/packages/f1/88/b2ab22a3a3feac78c62354a823c5c0c33659909e9918f53aa05904532b4b/cython-3.2.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:a2022bc48ad0c2c0e0485bf0b54902913a3d81086b7d435f4437620c667799f6", size = 3223755, upload-time = "2025-11-12T19:04:07.262Z" }, + { url = "https://files.pythonhosted.org/packages/0b/56/9ba58629a03cbffb5965a3c65ccd91fa683d95d588c21a875da72fdc249b/cython-3.2.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:99fdd4ffc2dcb513f4be9ce71c6fedd895b96b1f814655b6bbab196df497b090", size = 3113456, upload-time = "2025-11-12T19:04:09.175Z" }, + { url = "https://files.pythonhosted.org/packages/56/5b/148c1a7ea5aebe460a70cad716a77e5fd0205be2de9fc5250491eb13ad8c/cython-3.2.1-cp39-abi3-win32.whl", hash = "sha256:06071f85bd5ce040464d43b2f9f287742a79f905e81b709fe904567230f1ed51", size = 2434223, upload-time = "2025-11-12T19:04:11.294Z" }, + { url = "https://files.pythonhosted.org/packages/7a/54/bb9b0c9db2a92a5e93747ca3027cfc645741411f8f1c6af2fb2a7b82df5d/cython-3.2.1-cp39-abi3-win_arm64.whl", hash = "sha256:e87c131d59480aee1ebac622b64f287c0e1d665ad1a1b7d498ac48accdb36c6b", size = 2439268, upload-time = "2025-11-12T19:04:12.931Z" }, + { url = "https://files.pythonhosted.org/packages/aa/30/373775b8d933d781d055c1dd0f110f275a101f320dab724c8c63a7c1b945/cython-3.2.1-py3-none-any.whl", hash = "sha256:cd72c46e7bffe8250c52d400e72c8d5d3086437b6aeec5b0eca99ccd337f5834", size = 1254219, upload-time = "2025-11-12T19:02:56.14Z" }, +] + +[[package]] +name = "dash" +version = "3.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "flask" }, + { name = "importlib-metadata" }, + { name = "nest-asyncio" }, + { name = "plotly" }, + { name = "requests" }, + { name = "retrying" }, + { name = "setuptools" }, + { name = "typing-extensions" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e2/f9/516671861cf190bda37f6afa696d8a6a6ac593f23d8cf198e16faca044f5/dash-3.3.0.tar.gz", hash = "sha256:eaaa7a671540b5e1db8066f4966d0277d21edc2c7acdaec2fd6d198366a8b0df", size = 7579436, upload-time = "2025-11-12T15:51:54.919Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/cf/a4853e5b2b2bea55ae909095a8720b3ed50d07bdd40cbeafcedb5a6c47da/dash-3.3.0-py3-none-any.whl", hash = "sha256:8f52415977f7490492dd8a3872279160be8ff253ca9f4d49a4e3ba747fa4bd91", size = 7919707, upload-time = "2025-11-12T15:51:47.432Z" }, +] + +[[package]] +name = "dash-core-components" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/55/ad4a2cf9b7d4134779bd8d3a7e5b5f8cc757f421809e07c3e73bb374fdd7/dash_core_components-2.0.0.tar.gz", hash = "sha256:c6733874af975e552f95a1398a16c2ee7df14ce43fa60bb3718a3c6e0b63ffee", size = 3427, upload-time = "2021-09-03T17:11:19.342Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/9e/a29f726e84e531a36d56cff187e61d8c96d2cc253c5bcef9a7695acb7e6a/dash_core_components-2.0.0-py3-none-any.whl", hash = "sha256:52b8e8cce13b18d0802ee3acbc5e888cb1248a04968f962d63d070400af2e346", size = 3822, upload-time = "2022-03-02T16:50:30.899Z" }, +] + +[[package]] +name = "dash-html-components" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/14/c6/957d5e83b620473eb3c8557a253fb01c6a817b10ca43d3ff9d31796f32a6/dash_html_components-2.0.0.tar.gz", hash = "sha256:8703a601080f02619a6390998e0b3da4a5daabe97a1fd7a9cebc09d015f26e50", size = 3840, upload-time = "2021-09-03T17:15:28.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/75/65/1b16b853844ef59b2742a7de74a598f376ac0ab581f0dcc34db294e5c90e/dash_html_components-2.0.0-py3-none-any.whl", hash = "sha256:b42cc903713c9706af03b3f2548bda4be7307a7cf89b7d6eae3da872717d1b63", size = 4092, upload-time = "2022-03-02T16:56:07.734Z" }, +] + +[[package]] +name = "dash-table" +version = "5.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/81/34983fa0c67125d7fff9d55e5d1a065127bde7ca49ca32d04dedd55f9f35/dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308", size = 3391, upload-time = "2021-09-03T17:22:17.114Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/ce/43f77dc8e7bbad02a9f88d07bf794eaf68359df756a28bb9f2f78e255bb1/dash_table-5.0.0-py3-none-any.whl", hash = "sha256:19036fa352bb1c11baf38068ec62d172f0515f73ca3276c79dee49b95ddc16c9", size = 3912, upload-time = "2022-03-02T17:10:41.401Z" }, +] + +[[package]] +name = "dashpva" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "addict" }, + { name = "bitshuffle" }, + { name = "blosc2" }, + { name = "click" }, + { name = "configargparse" }, + { name = "dash" }, + { name = "dash-core-components" }, + { name = "dash-html-components" }, + { name = "dash-table" }, + { name = "deepinv" }, + { name = "fabio" }, + { name = "flask" }, + { name = "h5py" }, + { name = "hdf5plugin" }, + { name = "ipympl" }, + { name = "ipython" }, + { name = "ipywidgets" }, + { name = "joblib" }, + { name = "jupyter" }, + { name = "jupyter-core" }, + { name = "jupyterlab-widgets" }, + { name = "lmfit" }, + { name = "lz4" }, + { name = "markdown" }, + { name = "matplotlib" }, + { name = "numexpr" }, + { name = "numpy" }, + { name = "onnx" }, + { name = "onnxruntime" }, + { name = "open3d" }, + { name = "opencv-python" }, + { name = "pandas" }, + { name = "pillow" }, + { name = "pvapy" }, + { name = "pycocotools" }, + { name = "pyepics" }, + { name = "pyqt5" }, + { name = "pyqtgraph" }, + { name = "pyquaternion" }, + { name = "pytz" }, + { name = "pyvista", extra = ["jupyter"] }, + { name = "pyvistaqt" }, + { name = "pyyaml" }, + { name = "qtawesome" }, + { name = "qtpy" }, + { name = "requests" }, + { name = "scikit-image" }, + { name = "scikit-learn" }, + { name = "scipy" }, + { name = "segment-anything" }, + { name = "sqlalchemy" }, + { name = "superqt" }, + { name = "toml" }, + { name = "torch" }, + { name = "torchvision" }, + { name = "tqdm" }, + { name = "trame" }, + { name = "uncertainties" }, + { name = "xrayutilities" }, +] + +[package.metadata] +requires-dist = [ + { name = "addict", specifier = ">=2.4.0" }, + { name = "bitshuffle", specifier = ">=0.5.0" }, + { name = "blosc2", specifier = ">=3.2.0" }, + { name = "click", specifier = ">=8.1.7" }, + { name = "configargparse", specifier = ">=1.7" }, + { name = "dash", specifier = ">=2.18.0" }, + { name = "dash-core-components", specifier = ">=2.0.0" }, + { name = "dash-html-components", specifier = ">=2.0.0" }, + { name = "dash-table", specifier = ">=5.0.0" }, + { name = "deepinv", specifier = ">=0.3.7" }, + { name = "fabio", specifier = ">=2024.0.0" }, + { name = "flask", specifier = ">=3.1.0" }, + { name = "h5py", specifier = ">=3.12.0" }, + { name = "hdf5plugin", specifier = ">=4.0.0" }, + { name = "ipympl", specifier = ">=0.9.8" }, + { name = "ipython", specifier = ">=9.0.0" }, + { name = "ipywidgets", specifier = ">=8.1.0" }, + { name = "joblib", specifier = ">=1.4.0" }, + { name = "jupyter", specifier = ">=1.1.1" }, + { name = "jupyter-core", specifier = ">=5.7.0" }, + { name = "jupyterlab-widgets", specifier = ">=3.0.0" }, + { name = "lmfit", specifier = ">=1.3.0" }, + { name = "lz4", specifier = ">=4.4.0" }, + { name = "markdown", specifier = ">=3.10" }, + { name = "matplotlib", specifier = ">=3.10.0" }, + { name = "numexpr", specifier = ">=2.10.0" }, + { name = "numpy", specifier = ">=2.2.0" }, + { name = "onnx", specifier = ">=1.20.0" }, + { name = "onnxruntime", specifier = ">=1.23.2" }, + { name = "open3d", specifier = ">=0.19.0" }, + { name = "opencv-python", specifier = ">=4.8.0" }, + { name = "pandas", specifier = ">=2.2.0" }, + { name = "pillow", specifier = ">=12.0.0" }, + { name = "pvapy", specifier = ">=5.5.0" }, + { name = "pycocotools", specifier = ">=2.0.11" }, + { name = "pyepics", specifier = ">=3.5.0" }, + { name = "pyqt5", specifier = ">=5.15.0" }, + { name = "pyqtgraph", specifier = ">=0.13.0" }, + { name = "pyquaternion", specifier = ">=0.9.0" }, + { name = "pytz", specifier = ">=2025.1" }, + { name = "pyvista", extras = ["jupyter"], specifier = ">=0.44.0" }, + { name = "pyvistaqt", specifier = ">=0.11.0" }, + { name = "pyyaml", specifier = ">=6.0.0" }, + { name = "qtawesome", specifier = ">=1.4.0" }, + { name = "qtpy", specifier = ">=2.4.0" }, + { name = "requests", specifier = ">=2.32.0" }, + { name = "scikit-image", specifier = ">=0.22.0" }, + { name = "scikit-learn", specifier = ">=1.6.0" }, + { name = "scipy", specifier = ">=1.15.0" }, + { name = "segment-anything", git = "https://github.com/facebookresearch/segment-anything.git" }, + { name = "sqlalchemy", specifier = ">=2.0.44" }, + { name = "superqt", specifier = ">=0.6.7" }, + { name = "toml", specifier = ">=0.10.0" }, + { name = "torch", specifier = ">=2.9.1" }, + { name = "torchvision", specifier = ">=0.24.1" }, + { name = "tqdm", specifier = ">=4.67.0" }, + { name = "trame", specifier = ">=3.12.0" }, + { name = "uncertainties", specifier = ">=3.2.0" }, + { name = "xrayutilities", specifier = ">=1.7.0" }, +] + +[[package]] +name = "debugpy" +version = "1.8.19" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/75/9e12d4d42349b817cd545b89247696c67917aab907012ae5b64bbfea3199/debugpy-1.8.19.tar.gz", hash = "sha256:eea7e5987445ab0b5ed258093722d5ecb8bb72217c5c9b1e21f64efe23ddebdb", size = 1644590, upload-time = "2025-12-15T21:53:28.044Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/e2/48531a609b5a2aa94c6b6853afdfec8da05630ab9aaa96f1349e772119e9/debugpy-1.8.19-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:c5dcfa21de1f735a4f7ced4556339a109aa0f618d366ede9da0a3600f2516d8b", size = 2207620, upload-time = "2025-12-15T21:53:37.1Z" }, + { url = "https://files.pythonhosted.org/packages/1b/d4/97775c01d56071969f57d93928899e5616a4cfbbf4c8cc75390d3a51c4a4/debugpy-1.8.19-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:806d6800246244004625d5222d7765874ab2d22f3ba5f615416cf1342d61c488", size = 3170796, upload-time = "2025-12-15T21:53:38.513Z" }, + { url = "https://files.pythonhosted.org/packages/8d/7e/8c7681bdb05be9ec972bbb1245eb7c4c7b0679bb6a9e6408d808bc876d3d/debugpy-1.8.19-cp311-cp311-win32.whl", hash = "sha256:783a519e6dfb1f3cd773a9bda592f4887a65040cb0c7bd38dde410f4e53c40d4", size = 5164287, upload-time = "2025-12-15T21:53:40.857Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a8/aaac7ff12ddf5d68a39e13a423a8490426f5f661384f5ad8d9062761bd8e/debugpy-1.8.19-cp311-cp311-win_amd64.whl", hash = "sha256:14035cbdbb1fe4b642babcdcb5935c2da3b1067ac211c5c5a8fdc0bb31adbcaa", size = 5188269, upload-time = "2025-12-15T21:53:42.359Z" }, + { url = "https://files.pythonhosted.org/packages/25/3e/e27078370414ef35fafad2c06d182110073daaeb5d3bf734b0b1eeefe452/debugpy-1.8.19-py2.py3-none-any.whl", hash = "sha256:360ffd231a780abbc414ba0f005dad409e71c78637efe8f2bd75837132a41d38", size = 5292321, upload-time = "2025-12-15T21:54:16.024Z" }, +] + +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, +] + +[[package]] +name = "deepinv" +version = "0.3.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "einops" }, + { name = "h5py" }, + { name = "matplotlib" }, + { name = "natsort" }, + { name = "numpy" }, + { name = "requests" }, + { name = "torch" }, + { name = "torchmetrics" }, + { name = "torchvision" }, + { name = "tqdm" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/87/a1/79dfe9b58c0c2ee05e5fdd5383f0cf6dbb1c55e0d28c2f21ab558a5b687c/deepinv-0.3.7.tar.gz", hash = "sha256:74630502320c0d22757a5bb1ea1c0035b47f77f86d553b2b48850fa388281b23", size = 677972, upload-time = "2025-12-15T12:14:51.887Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/97/21928458bcd04cf3e92d41998c0f64e98331644687006010187fb0a4cb1a/deepinv-0.3.7-py3-none-any.whl", hash = "sha256:26a4b292026d13958485947a47f9ad27edc55a90bbc02a24bdc9d887182650c6", size = 850394, upload-time = "2025-12-15T12:14:50.061Z" }, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/d5/c66da9b79e5bdb124974bfe172b4daf3c984ebd9c2a06e2b8a4dc7331c72/defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69", size = 75520, upload-time = "2021-03-08T10:59:26.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604, upload-time = "2021-03-08T10:59:24.45Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "einops" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e5/81/df4fbe24dff8ba3934af99044188e20a98ed441ad17a274539b74e82e126/einops-0.8.1.tar.gz", hash = "sha256:de5d960a7a761225532e0f1959e5315ebeafc0cd43394732f103ca44b9837e84", size = 54805, upload-time = "2025-02-09T03:17:00.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/62/9773de14fe6c45c23649e98b83231fffd7b9892b6cf863251dc2afa73643/einops-0.8.1-py3-none-any.whl", hash = "sha256:919387eb55330f5757c6bea9165c5ff5cfe63a642682ea788a6d472576d81737", size = 64359, upload-time = "2025-02-09T03:17:01.998Z" }, +] + +[[package]] +name = "executing" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/28/c14e053b6762b1044f34a13aab6859bbf40456d37d23aa286ac24cfd9a5d/executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4", size = 1129488, upload-time = "2025-09-01T09:48:10.866Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" }, +] + +[[package]] +name = "fabio" +version = "2025.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "h5py" }, + { name = "hdf5plugin" }, + { name = "lxml" }, + { name = "numpy" }, + { name = "pillow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/de/1e20900502b8a04b095fa44affc368ecffbad800dfce34f4e47a4910e2ef/fabio-2025.10.0.tar.gz", hash = "sha256:c19763bcfa02a78a507f3d914b564a8997c88a6aa7b4f87f9db4ce7fa397d256", size = 907037, upload-time = "2025-10-29T15:39:00.195Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/63/6423124a02b0c38c619b48f13bf87bb60b75733c3cbe4ff513fafe51ecba/fabio-2025.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:44aa4b724ac5d3fa861afaf43f634501caaa52063b68a6e023829c563d2d8645", size = 1021057, upload-time = "2025-10-29T15:38:05.032Z" }, + { url = "https://files.pythonhosted.org/packages/b2/a8/9e7dbb35e1e2f0b10a5a8c0cff5f11e229d822f1996b0bf50b6e87a14e9f/fabio-2025.10.0-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:0d492c3d4353d2900c2bcf30744f32331c81071283a1ede8b8c1f775f874e0d4", size = 1079562, upload-time = "2025-10-29T15:38:06.608Z" }, + { url = "https://files.pythonhosted.org/packages/94/54/3590c90b8d7f500d6f8ae17fec703fe11419fdb1139ab23bce91d91014e5/fabio-2025.10.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:602da1ca8df271e13d6a321e753bde05f3dd38acf41b6e5e5d5b075df17cc815", size = 1279948, upload-time = "2025-10-29T15:38:09.287Z" }, + { url = "https://files.pythonhosted.org/packages/90/c5/a07b2822081f8550095e720118ead5b9cde52f75b499e1a4e17c87824066/fabio-2025.10.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:61fb5183f491c6ad3b89ea23bdb1d3488f4093b292753b1485fe809db9d1aedf", size = 1417613, upload-time = "2025-10-29T15:38:10.758Z" }, + { url = "https://files.pythonhosted.org/packages/17/fa/3859496b47674865d5669175a9e4457c8bc5801704da531101cb79570741/fabio-2025.10.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be0811e015fe5fe4e5f9b9b0a42223ab07998cb8ab288f80dc04bc2475f8cee2", size = 1329892, upload-time = "2025-10-29T15:38:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/a8/d0/7a4bd8d6486ce0ce119b6eec4e15f07e36f66b244dad0f10004c8c737735/fabio-2025.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b4f5576da95a8ccb05f42a35a2e8e646d26ae440a66e25c30bc7c568e5b269", size = 1218586, upload-time = "2025-10-29T15:38:14.009Z" }, + { url = "https://files.pythonhosted.org/packages/73/7b/0cf465181832d2169572c884a9803f913f964d2e09336cf12221f3cf6f57/fabio-2025.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17ed28a5938836dcee78a905952f6b9d4cde9020674fe416ac5ba0bb7d80942a", size = 1292785, upload-time = "2025-10-29T15:38:15.506Z" }, + { url = "https://files.pythonhosted.org/packages/de/02/fa52f926cd2bb56bc25291be4b3c66daa13163cd7fa04c304e8fd863798d/fabio-2025.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:0ebd2ca5337bf45215afa1509b53c047055086098ef6a314190bd5088c62b37e", size = 1335509, upload-time = "2025-10-29T15:38:17.196Z" }, +] + +[[package]] +name = "fastjsonschema" +version = "2.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/b5/23b216d9d985a956623b6bd12d4086b60f0059b27799f23016af04a74ea1/fastjsonschema-2.21.2.tar.gz", hash = "sha256:b1eb43748041c880796cd077f1a07c3d94e93ae84bba5ed36800a33554ae05de", size = 374130, upload-time = "2025-08-14T18:49:36.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/a8/20d0723294217e47de6d9e2e40fd4a9d2f7c4b6ef974babd482a59743694/fastjsonschema-2.21.2-py3-none-any.whl", hash = "sha256:1c797122d0a86c5cace2e54bf4e819c36223b552017172f32c5c024a6b77e463", size = 24024, upload-time = "2025-08-14T18:49:34.776Z" }, +] + +[[package]] +name = "filelock" +version = "3.20.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c1/e0/a75dbe4bca1e7d41307323dad5ea2efdd95408f74ab2de8bd7dba9b51a1a/filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64", size = 19510, upload-time = "2026-01-02T15:33:32.582Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/30/ab407e2ec752aa541704ed8f93c11e2a5d92c168b8a755d818b74a3c5c2d/filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8", size = 16697, upload-time = "2026-01-02T15:33:31.133Z" }, +] + +[[package]] +name = "flask" +version = "3.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blinker" }, + { name = "click" }, + { name = "itsdangerous" }, + { name = "jinja2" }, + { name = "markupsafe" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/6d/cfe3c0fcc5e477df242b98bfe186a4c34357b4847e87ecaef04507332dab/flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87", size = 720160, upload-time = "2025-08-19T21:03:21.205Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/f9/7f9263c5695f4bd0023734af91bedb2ff8209e8de6ead162f35d8dc762fd/flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c", size = 103308, upload-time = "2025-08-19T21:03:19.499Z" }, +] + +[[package]] +name = "flatbuffers" +version = "25.12.19" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/2d/d2a548598be01649e2d46231d151a6c56d10b964d94043a335ae56ea2d92/flatbuffers-25.12.19-py2.py3-none-any.whl", hash = "sha256:7634f50c427838bb021c2d66a3d1168e9d199b0607e6329399f04846d42e20b4", size = 26661, upload-time = "2025-12-19T23:16:13.622Z" }, +] + +[[package]] +name = "fonttools" +version = "4.60.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4b/42/97a13e47a1e51a5a7142475bbcf5107fe3a68fc34aef331c897d5fb98ad0/fonttools-4.60.1.tar.gz", hash = "sha256:ef00af0439ebfee806b25f24c8f92109157ff3fac5731dc7867957812e87b8d9", size = 3559823, upload-time = "2025-09-29T21:13:27.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/85/639aa9bface1537e0fb0f643690672dde0695a5bbbc90736bc571b0b1941/fonttools-4.60.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7b4c32e232a71f63a5d00259ca3d88345ce2a43295bb049d21061f338124246f", size = 2831872, upload-time = "2025-09-29T21:11:20.329Z" }, + { url = "https://files.pythonhosted.org/packages/6b/47/3c63158459c95093be9618794acb1067b3f4d30dcc5c3e8114b70e67a092/fonttools-4.60.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3630e86c484263eaac71d117085d509cbcf7b18f677906824e4bace598fb70d2", size = 2356990, upload-time = "2025-09-29T21:11:22.754Z" }, + { url = "https://files.pythonhosted.org/packages/94/dd/1934b537c86fcf99f9761823f1fc37a98fbd54568e8e613f29a90fed95a9/fonttools-4.60.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c1015318e4fec75dd4943ad5f6a206d9727adf97410d58b7e32ab644a807914", size = 5042189, upload-time = "2025-09-29T21:11:25.061Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d2/9f4e4c4374dd1daa8367784e1bd910f18ba886db1d6b825b12edf6db3edc/fonttools-4.60.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e6c58beb17380f7c2ea181ea11e7db8c0ceb474c9dd45f48e71e2cb577d146a1", size = 4978683, upload-time = "2025-09-29T21:11:27.693Z" }, + { url = "https://files.pythonhosted.org/packages/cc/c4/0fb2dfd1ecbe9a07954cc13414713ed1eab17b1c0214ef07fc93df234a47/fonttools-4.60.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec3681a0cb34c255d76dd9d865a55f260164adb9fa02628415cdc2d43ee2c05d", size = 5021372, upload-time = "2025-09-29T21:11:30.257Z" }, + { url = "https://files.pythonhosted.org/packages/0c/d5/495fc7ae2fab20223cc87179a8f50f40f9a6f821f271ba8301ae12bb580f/fonttools-4.60.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f4b5c37a5f40e4d733d3bbaaef082149bee5a5ea3156a785ff64d949bd1353fa", size = 5132562, upload-time = "2025-09-29T21:11:32.737Z" }, + { url = "https://files.pythonhosted.org/packages/bc/fa/021dab618526323c744e0206b3f5c8596a2e7ae9aa38db5948a131123e83/fonttools-4.60.1-cp311-cp311-win32.whl", hash = "sha256:398447f3d8c0c786cbf1209711e79080a40761eb44b27cdafffb48f52bcec258", size = 2230288, upload-time = "2025-09-29T21:11:35.015Z" }, + { url = "https://files.pythonhosted.org/packages/bb/78/0e1a6d22b427579ea5c8273e1c07def2f325b977faaf60bb7ddc01456cb1/fonttools-4.60.1-cp311-cp311-win_amd64.whl", hash = "sha256:d066ea419f719ed87bc2c99a4a4bfd77c2e5949cb724588b9dd58f3fd90b92bf", size = 2278184, upload-time = "2025-09-29T21:11:37.434Z" }, + { url = "https://files.pythonhosted.org/packages/c7/93/0dd45cd283c32dea1545151d8c3637b4b8c53cdb3a625aeb2885b184d74d/fonttools-4.60.1-py3-none-any.whl", hash = "sha256:906306ac7afe2156fcf0042173d6ebbb05416af70f6b370967b47f8f00103bbb", size = 1143175, upload-time = "2025-09-29T21:13:24.134Z" }, +] + +[[package]] +name = "fqdn" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/3e/a80a8c077fd798951169626cde3e239adeba7dab75deb3555716415bd9b0/fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f", size = 6015, upload-time = "2021-03-11T07:16:29.08Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/58/8acf1b3e91c58313ce5cb67df61001fc9dcd21be4fadb76c1a2d540e09ed/fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014", size = 9121, upload-time = "2021-03-11T07:16:28.351Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/c831fac6cc817d26fd54c7eaccd04ef7e0288806943f7cc5bbf69f3ac1f0/frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad", size = 45875, upload-time = "2025-10-06T05:38:17.865Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/03/077f869d540370db12165c0aa51640a873fb661d8b315d1d4d67b284d7ac/frozenlist-1.8.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:09474e9831bc2b2199fad6da3c14c7b0fbdd377cce9d3d77131be28906cb7d84", size = 86912, upload-time = "2025-10-06T05:35:45.98Z" }, + { url = "https://files.pythonhosted.org/packages/df/b5/7610b6bd13e4ae77b96ba85abea1c8cb249683217ef09ac9e0ae93f25a91/frozenlist-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:17c883ab0ab67200b5f964d2b9ed6b00971917d5d8a92df149dc2c9779208ee9", size = 50046, upload-time = "2025-10-06T05:35:47.009Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ef/0e8f1fe32f8a53dd26bdd1f9347efe0778b0fddf62789ea683f4cc7d787d/frozenlist-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fa47e444b8ba08fffd1c18e8cdb9a75db1b6a27f17507522834ad13ed5922b93", size = 50119, upload-time = "2025-10-06T05:35:48.38Z" }, + { url = "https://files.pythonhosted.org/packages/11/b1/71a477adc7c36e5fb628245dfbdea2166feae310757dea848d02bd0689fd/frozenlist-1.8.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2552f44204b744fba866e573be4c1f9048d6a324dfe14475103fd51613eb1d1f", size = 231067, upload-time = "2025-10-06T05:35:49.97Z" }, + { url = "https://files.pythonhosted.org/packages/45/7e/afe40eca3a2dc19b9904c0f5d7edfe82b5304cb831391edec0ac04af94c2/frozenlist-1.8.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:957e7c38f250991e48a9a73e6423db1bb9dd14e722a10f6b8bb8e16a0f55f695", size = 233160, upload-time = "2025-10-06T05:35:51.729Z" }, + { url = "https://files.pythonhosted.org/packages/a6/aa/7416eac95603ce428679d273255ffc7c998d4132cfae200103f164b108aa/frozenlist-1.8.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8585e3bb2cdea02fc88ffa245069c36555557ad3609e83be0ec71f54fd4abb52", size = 228544, upload-time = "2025-10-06T05:35:53.246Z" }, + { url = "https://files.pythonhosted.org/packages/8b/3d/2a2d1f683d55ac7e3875e4263d28410063e738384d3adc294f5ff3d7105e/frozenlist-1.8.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:edee74874ce20a373d62dc28b0b18b93f645633c2943fd90ee9d898550770581", size = 243797, upload-time = "2025-10-06T05:35:54.497Z" }, + { url = "https://files.pythonhosted.org/packages/78/1e/2d5565b589e580c296d3bb54da08d206e797d941a83a6fdea42af23be79c/frozenlist-1.8.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c9a63152fe95756b85f31186bddf42e4c02c6321207fd6601a1c89ebac4fe567", size = 247923, upload-time = "2025-10-06T05:35:55.861Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c3/65872fcf1d326a7f101ad4d86285c403c87be7d832b7470b77f6d2ed5ddc/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b6db2185db9be0a04fecf2f241c70b63b1a242e2805be291855078f2b404dd6b", size = 230886, upload-time = "2025-10-06T05:35:57.399Z" }, + { url = "https://files.pythonhosted.org/packages/a0/76/ac9ced601d62f6956f03cc794f9e04c81719509f85255abf96e2510f4265/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f4be2e3d8bc8aabd566f8d5b8ba7ecc09249d74ba3c9ed52e54dc23a293f0b92", size = 245731, upload-time = "2025-10-06T05:35:58.563Z" }, + { url = "https://files.pythonhosted.org/packages/b9/49/ecccb5f2598daf0b4a1415497eba4c33c1e8ce07495eb07d2860c731b8d5/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c8d1634419f39ea6f5c427ea2f90ca85126b54b50837f31497f3bf38266e853d", size = 241544, upload-time = "2025-10-06T05:35:59.719Z" }, + { url = "https://files.pythonhosted.org/packages/53/4b/ddf24113323c0bbcc54cb38c8b8916f1da7165e07b8e24a717b4a12cbf10/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1a7fa382a4a223773ed64242dbe1c9c326ec09457e6b8428efb4118c685c3dfd", size = 241806, upload-time = "2025-10-06T05:36:00.959Z" }, + { url = "https://files.pythonhosted.org/packages/a7/fb/9b9a084d73c67175484ba2789a59f8eebebd0827d186a8102005ce41e1ba/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:11847b53d722050808926e785df837353bd4d75f1d494377e59b23594d834967", size = 229382, upload-time = "2025-10-06T05:36:02.22Z" }, + { url = "https://files.pythonhosted.org/packages/95/a3/c8fb25aac55bf5e12dae5c5aa6a98f85d436c1dc658f21c3ac73f9fa95e5/frozenlist-1.8.0-cp311-cp311-win32.whl", hash = "sha256:27c6e8077956cf73eadd514be8fb04d77fc946a7fe9f7fe167648b0b9085cc25", size = 39647, upload-time = "2025-10-06T05:36:03.409Z" }, + { url = "https://files.pythonhosted.org/packages/0a/f5/603d0d6a02cfd4c8f2a095a54672b3cf967ad688a60fb9faf04fc4887f65/frozenlist-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac913f8403b36a2c8610bbfd25b8013488533e71e62b4b4adce9c86c8cea905b", size = 44064, upload-time = "2025-10-06T05:36:04.368Z" }, + { url = "https://files.pythonhosted.org/packages/5d/16/c2c9ab44e181f043a86f9a8f84d5124b62dbcb3a02c0977ec72b9ac1d3e0/frozenlist-1.8.0-cp311-cp311-win_arm64.whl", hash = "sha256:d4d3214a0f8394edfa3e303136d0575eece0745ff2b47bd2cb2e66dd92d4351a", size = 39937, upload-time = "2025-10-06T05:36:05.669Z" }, + { url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" }, +] + +[[package]] +name = "fsspec" +version = "2025.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b6/27/954057b0d1f53f086f681755207dda6de6c660ce133c829158e8e8fe7895/fsspec-2025.12.0.tar.gz", hash = "sha256:c505de011584597b1060ff778bb664c1bc022e87921b0e4f10cc9c44f9635973", size = 309748, upload-time = "2025-12-03T15:23:42.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/c7/b64cae5dba3a1b138d7123ec36bb5ccd39d39939f18454407e5468f4763f/fsspec-2025.12.0-py3-none-any.whl", hash = "sha256:8bf1fe301b7d8acfa6e8571e3b1c3d158f909666642431cc78a1b7b4dbc5ec5b", size = 201422, upload-time = "2025-12-03T15:23:41.434Z" }, +] + +[[package]] +name = "greenlet" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/b8/704d753a5a45507a7aab61f18db9509302ed3d0a27ac7e0359ec2905b1a6/greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d", size = 188260, upload-time = "2025-08-07T13:24:33.51Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/de/f28ced0a67749cac23fecb02b694f6473f47686dff6afaa211d186e2ef9c/greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2", size = 272305, upload-time = "2025-08-07T13:15:41.288Z" }, + { url = "https://files.pythonhosted.org/packages/09/16/2c3792cba130000bf2a31c5272999113f4764fd9d874fb257ff588ac779a/greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246", size = 632472, upload-time = "2025-08-07T13:42:55.044Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/95d48d7e3d433e6dae5b1682e4292242a53f22df82e6d3dda81b1701a960/greenlet-3.2.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:94abf90142c2a18151632371140b3dba4dee031633fe614cb592dbb6c9e17bc3", size = 644646, upload-time = "2025-08-07T13:45:26.523Z" }, + { url = "https://files.pythonhosted.org/packages/d5/5e/405965351aef8c76b8ef7ad370e5da58d57ef6068df197548b015464001a/greenlet-3.2.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:4d1378601b85e2e5171b99be8d2dc85f594c79967599328f95c1dc1a40f1c633", size = 640519, upload-time = "2025-08-07T13:53:13.928Z" }, + { url = "https://files.pythonhosted.org/packages/25/5d/382753b52006ce0218297ec1b628e048c4e64b155379331f25a7316eb749/greenlet-3.2.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0db5594dce18db94f7d1650d7489909b57afde4c580806b8d9203b6e79cdc079", size = 639707, upload-time = "2025-08-07T13:18:27.146Z" }, + { url = "https://files.pythonhosted.org/packages/1f/8e/abdd3f14d735b2929290a018ecf133c901be4874b858dd1c604b9319f064/greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8", size = 587684, upload-time = "2025-08-07T13:18:25.164Z" }, + { url = "https://files.pythonhosted.org/packages/5d/65/deb2a69c3e5996439b0176f6651e0052542bb6c8f8ec2e3fba97c9768805/greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52", size = 1116647, upload-time = "2025-08-07T13:42:38.655Z" }, + { url = "https://files.pythonhosted.org/packages/3f/cc/b07000438a29ac5cfb2194bfc128151d52f333cee74dd7dfe3fb733fc16c/greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa", size = 1142073, upload-time = "2025-08-07T13:18:21.737Z" }, + { url = "https://files.pythonhosted.org/packages/67/24/28a5b2fa42d12b3d7e5614145f0bd89714c34c08be6aabe39c14dd52db34/greenlet-3.2.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9c6de1940a7d828635fbd254d69db79e54619f165ee7ce32fda763a9cb6a58c", size = 1548385, upload-time = "2025-11-04T12:42:11.067Z" }, + { url = "https://files.pythonhosted.org/packages/6a/05/03f2f0bdd0b0ff9a4f7b99333d57b53a7709c27723ec8123056b084e69cd/greenlet-3.2.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03c5136e7be905045160b1b9fdca93dd6727b180feeafda6818e6496434ed8c5", size = 1613329, upload-time = "2025-11-04T12:42:12.928Z" }, + { url = "https://files.pythonhosted.org/packages/d8/0f/30aef242fcab550b0b3520b8e3561156857c94288f0332a79928c31a52cf/greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9", size = 299100, upload-time = "2025-08-07T13:44:12.287Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "h5py" +version = "3.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4d/6a/0d79de0b025aa85dc8864de8e97659c94cf3d23148394a954dc5ca52f8c8/h5py-3.15.1.tar.gz", hash = "sha256:c86e3ed45c4473564de55aa83b6fc9e5ead86578773dfbd93047380042e26b69", size = 426236, upload-time = "2025-10-16T10:35:27.404Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/fd/8349b48b15b47768042cff06ad6e1c229f0a4bd89225bf6b6894fea27e6d/h5py-3.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5aaa330bcbf2830150c50897ea5dcbed30b5b6d56897289846ac5b9e529ec243", size = 3434135, upload-time = "2025-10-16T10:33:47.954Z" }, + { url = "https://files.pythonhosted.org/packages/c1/b0/1c628e26a0b95858f54aba17e1599e7f6cd241727596cc2580b72cb0a9bf/h5py-3.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c970fb80001fffabb0109eaf95116c8e7c0d3ca2de854e0901e8a04c1f098509", size = 2870958, upload-time = "2025-10-16T10:33:50.907Z" }, + { url = "https://files.pythonhosted.org/packages/f9/e3/c255cafc9b85e6ea04e2ad1bba1416baa1d7f57fc98a214be1144087690c/h5py-3.15.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:80e5bb5b9508d5d9da09f81fd00abbb3f85da8143e56b1585d59bc8ceb1dba8b", size = 4504770, upload-time = "2025-10-16T10:33:54.357Z" }, + { url = "https://files.pythonhosted.org/packages/8b/23/4ab1108e87851ccc69694b03b817d92e142966a6c4abd99e17db77f2c066/h5py-3.15.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5b849ba619a066196169763c33f9f0f02e381156d61c03e000bb0100f9950faf", size = 4700329, upload-time = "2025-10-16T10:33:57.616Z" }, + { url = "https://files.pythonhosted.org/packages/a4/e4/932a3a8516e4e475b90969bf250b1924dbe3612a02b897e426613aed68f4/h5py-3.15.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e7f6c841efd4e6e5b7e82222eaf90819927b6d256ab0f3aca29675601f654f3c", size = 4152456, upload-time = "2025-10-16T10:34:00.843Z" }, + { url = "https://files.pythonhosted.org/packages/2a/0a/f74d589883b13737021b2049ac796328f188dbb60c2ed35b101f5b95a3fc/h5py-3.15.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ca8a3a22458956ee7b40d8e39c9a9dc01f82933e4c030c964f8b875592f4d831", size = 4617295, upload-time = "2025-10-16T10:34:04.154Z" }, + { url = "https://files.pythonhosted.org/packages/23/95/499b4e56452ef8b6c95a271af0dde08dac4ddb70515a75f346d4f400579b/h5py-3.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:550e51131376889656feec4aff2170efc054a7fe79eb1da3bb92e1625d1ac878", size = 2882129, upload-time = "2025-10-16T10:34:06.886Z" }, + { url = "https://files.pythonhosted.org/packages/ce/bb/cfcc70b8a42222ba3ad4478bcef1791181ea908e2adbd7d53c66395edad5/h5py-3.15.1-cp311-cp311-win_arm64.whl", hash = "sha256:b39239947cb36a819147fc19e86b618dcb0953d1cd969f5ed71fc0de60392427", size = 2477121, upload-time = "2025-10-16T10:34:09.579Z" }, +] + +[[package]] +name = "hdf5plugin" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "h5py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/4f/9130151e3aa475b3e4e9a611bf608107fe5c72d277d74c4cf36f164b7c81/hdf5plugin-6.0.0.tar.gz", hash = "sha256:847ed9e96b451367a110f0ba64a3b260d38d64bbf3f25751858d3b56e094cfe0", size = 66372085, upload-time = "2025-10-08T18:16:28.423Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/13/15017f6210bfea843316d62f0f121e364e17bb129444ed803a256a213036/hdf5plugin-6.0.0-py3-none-macosx_10_13_universal2.whl", hash = "sha256:a59fbd5d4290a8a5334d82ccb4c6b9bfc7aaf586de7fedb88762e8601bc05fd4", size = 13339413, upload-time = "2025-10-08T18:16:10.656Z" }, + { url = "https://files.pythonhosted.org/packages/40/bf/d1f3765fb879820d7331e30e860b684f5b78d3ec17324e8f54130cbe560b/hdf5plugin-6.0.0-py3-none-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d301f4b9295872bacf277c70628d4c5e965ee47db762d8fde2d4849f201b9897", size = 42858563, upload-time = "2025-10-08T18:16:14.106Z" }, + { url = "https://files.pythonhosted.org/packages/0a/67/37d0b84fbbf26bf0d6a99a8f98bcd82bb6d437dc8cabee259fb3d7506ec7/hdf5plugin-6.0.0-py3-none-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:78b082ea355fe46bf5b396024de1fb662a1aaf9a5e11861ad61a5a2a6316d59d", size = 45126124, upload-time = "2025-10-08T18:16:17.992Z" }, + { url = "https://files.pythonhosted.org/packages/ed/2f/1046d464ad1db29a4f6c70ba4e19b39baa8a6542c719eaa4e765108f07f1/hdf5plugin-6.0.0-py3-none-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:79e0524d18ddc41c0cf2e1bb2e529d4e154c286f6a1bd85f3d44019d2a17574a", size = 44857273, upload-time = "2025-10-08T18:16:22.007Z" }, + { url = "https://files.pythonhosted.org/packages/61/b3/75478bdfee85533777de4204373f563aa7a1074355300743c3aedc33cac5/hdf5plugin-6.0.0-py3-none-win_amd64.whl", hash = "sha256:99866f90be1ceac5519e6e038669564be326c233618d59ba1f38c9dd8c32099e", size = 3379316, upload-time = "2025-10-08T18:16:25.007Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "humanfriendly" +version = "10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyreadline3", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/3f/2c29224acb2e2df4d2046e4c73ee2662023c58ff5b113c4c1adac0886c43/humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc", size = 360702, upload-time = "2021-09-17T21:40:43.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477", size = 86794, upload-time = "2021-09-17T21:40:39.897Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "imageio" +version = "2.37.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/6f/606be632e37bf8d05b253e8626c2291d74c691ddc7bcdf7d6aaf33b32f6a/imageio-2.37.2.tar.gz", hash = "sha256:0212ef2727ac9caa5ca4b2c75ae89454312f440a756fcfc8ef1993e718f50f8a", size = 389600, upload-time = "2025-11-04T14:29:39.898Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/fe/301e0936b79bcab4cacc7548bf2853fc28dced0a578bab1f7ef53c9aa75b/imageio-2.37.2-py3-none-any.whl", hash = "sha256:ad9adfb20335d718c03de457358ed69f141021a333c40a53e57273d8a5bd0b9b", size = 317646, upload-time = "2025-11-04T14:29:37.948Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "ipykernel" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "appnope", marker = "sys_platform == 'darwin'" }, + { name = "comm" }, + { name = "debugpy" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "matplotlib-inline" }, + { name = "nest-asyncio" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/a4/4948be6eb88628505b83a1f2f40d90254cab66abf2043b3c40fa07dfce0f/ipykernel-7.1.0.tar.gz", hash = "sha256:58a3fc88533d5930c3546dc7eac66c6d288acde4f801e2001e65edc5dc9cf0db", size = 174579, upload-time = "2025-10-27T09:46:39.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/17/20c2552266728ceba271967b87919664ecc0e33efca29c3efc6baf88c5f9/ipykernel-7.1.0-py3-none-any.whl", hash = "sha256:763b5ec6c5b7776f6a8d7ce09b267693b4e5ce75cb50ae696aaefb3c85e1ea4c", size = 117968, upload-time = "2025-10-27T09:46:37.805Z" }, +] + +[[package]] +name = "ipympl" +version = "0.9.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipython" }, + { name = "ipywidgets" }, + { name = "matplotlib" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/8c/f9e60abf409cef8234e66e69ce3fe263f1236b285f9105ea125e4660b77a/ipympl-0.9.8.tar.gz", hash = "sha256:6d7230d518384521093f3854f7db89d069dcd9c28a935b371e9c9f126354dee1", size = 58483988, upload-time = "2025-10-09T14:20:07.741Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/6e/9148bfed8ca535e4c61ce7843327c76ec7c63c40e33848ec03aa844a26af/ipympl-0.9.8-py3-none-any.whl", hash = "sha256:4a03612f77d92c9e2160c9e0d2a80b277e30387126399088f780dba9622247be", size = 515832, upload-time = "2025-10-09T14:20:05.39Z" }, +] + +[[package]] +name = "ipython" +version = "9.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "ipython-pygments-lexers" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/29/e6/48c74d54039241a456add616464ea28c6ebf782e4110d419411b83dae06f/ipython-9.7.0.tar.gz", hash = "sha256:5f6de88c905a566c6a9d6c400a8fed54a638e1f7543d17aae2551133216b1e4e", size = 4422115, upload-time = "2025-11-05T12:18:54.646Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/aa/62893d6a591d337aa59dcc4c6f6c842f1fe20cd72c8c5c1f980255243252/ipython-9.7.0-py3-none-any.whl", hash = "sha256:bce8ac85eb9521adc94e1845b4c03d88365fd6ac2f4908ec4ed1eb1b0a065f9f", size = 618911, upload-time = "2025-11-05T12:18:52.484Z" }, +] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, +] + +[[package]] +name = "ipywidgets" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "comm" }, + { name = "ipython" }, + { name = "jupyterlab-widgets" }, + { name = "traitlets" }, + { name = "widgetsnbextension" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/ae/c5ce1edc1afe042eadb445e95b0671b03cee61895264357956e61c0d2ac0/ipywidgets-8.1.8.tar.gz", hash = "sha256:61f969306b95f85fba6b6986b7fe45d73124d1d9e3023a8068710d47a22ea668", size = 116739, upload-time = "2025-11-01T21:18:12.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/6d/0d9848617b9f753b87f214f1c682592f7ca42de085f564352f10f0843026/ipywidgets-8.1.8-py3-none-any.whl", hash = "sha256:ecaca67aed704a338f88f67b1181b58f821ab5dc89c1f0f5ef99db43c1c2921e", size = 139808, upload-time = "2025-11-01T21:18:10.956Z" }, +] + +[[package]] +name = "isoduration" +version = "20.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "arrow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/1a/3c8edc664e06e6bd06cce40c6b22da5f1429aa4224d0c590f3be21c91ead/isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9", size = 11649, upload-time = "2020-11-01T11:00:00.312Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/55/e5326141505c5d5e34c5e0935d2908a74e4561eca44108fbfb9c13d2911a/isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042", size = 11321, upload-time = "2020-11-01T10:59:58.02Z" }, +] + +[[package]] +name = "itsdangerous" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/cb/8ac0172223afbccb63986cc25049b154ecfb5e85932587206f42317be31d/itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173", size = 54410, upload-time = "2024-04-16T21:28:15.614Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/96/92447566d16df59b2a776c0fb82dbc4d9e07cd95062562af01e408583fc4/itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", size = 16234, upload-time = "2024-04-16T21:28:14.499Z" }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "joblib" +version = "1.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/5d/447af5ea094b9e4c4054f82e223ada074c552335b9b4b2d14bd9b35a67c4/joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55", size = 331077, upload-time = "2025-08-27T12:15:46.575Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/e8/685f47e0d754320684db4425a0967f7d3fa70126bffd76110b7009a0090f/joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241", size = 308396, upload-time = "2025-08-27T12:15:45.188Z" }, +] + +[[package]] +name = "json5" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/ae/929aee9619e9eba9015207a9d2c1c54db18311da7eb4dcf6d41ad6f0eb67/json5-0.12.1.tar.gz", hash = "sha256:b2743e77b3242f8d03c143dd975a6ec7c52e2f2afe76ed934e53503dd4ad4990", size = 52191, upload-time = "2025-08-12T19:47:42.583Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/e2/05328bd2621be49a6fed9e3030b1e51a2d04537d3f816d211b9cc53c5262/json5-0.12.1-py3-none-any.whl", hash = "sha256:d9c9b3bc34a5f54d43c35e11ef7cb87d8bdd098c6ace87117a7b7e83e705c1d5", size = 36119, upload-time = "2025-08-12T19:47:41.131Z" }, +] + +[[package]] +name = "jsonpointer" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[package.optional-dependencies] +format-nongpl = [ + { name = "fqdn" }, + { name = "idna" }, + { name = "isoduration" }, + { name = "jsonpointer" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "rfc3987-syntax" }, + { name = "uri-template" }, + { name = "webcolors" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "jupyter" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipykernel" }, + { name = "ipywidgets" }, + { name = "jupyter-console" }, + { name = "jupyterlab" }, + { name = "nbconvert" }, + { name = "notebook" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/f3/af28ea964ab8bc1e472dba2e82627d36d470c51f5cd38c37502eeffaa25e/jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a", size = 5714959, upload-time = "2024-08-30T07:15:48.299Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/64/285f20a31679bf547b75602702f7800e74dbabae36ef324f716c02804753/jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83", size = 2657, upload-time = "2024-08-30T07:15:47.045Z" }, +] + +[[package]] +name = "jupyter-client" +version = "8.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-core" }, + { name = "python-dateutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/22/bf9f12fdaeae18019a468b68952a60fe6dbab5d67cd2a103cac7659b41ca/jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419", size = 342019, upload-time = "2024-09-17T10:44:17.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/85/b0394e0b6fcccd2c1eeefc230978a6f8cb0c5df1e4cd3e7625735a0d7d1e/jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f", size = 106105, upload-time = "2024-09-17T10:44:15.218Z" }, +] + +[[package]] +name = "jupyter-console" +version = "6.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipykernel" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "pyzmq" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bd/2d/e2fd31e2fc41c14e2bcb6c976ab732597e907523f6b2420305f9fc7fdbdb/jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539", size = 34363, upload-time = "2023-03-06T14:13:31.02Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/77/71d78d58f15c22db16328a476426f7ac4a60d3a5a7ba3b9627ee2f7903d4/jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485", size = 24510, upload-time = "2023-03-06T14:13:28.229Z" }, +] + +[[package]] +name = "jupyter-core" +version = "5.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "platformdirs" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/02/49/9d1284d0dc65e2c757b74c6687b6d319b02f822ad039e5c512df9194d9dd/jupyter_core-5.9.1.tar.gz", hash = "sha256:4d09aaff303b9566c3ce657f580bd089ff5c91f5f89cf7d8846c3cdf465b5508", size = 89814, upload-time = "2025-10-16T19:19:18.444Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/e7/80988e32bf6f73919a113473a604f5a8f09094de312b9d52b79c2df7612b/jupyter_core-5.9.1-py3-none-any.whl", hash = "sha256:ebf87fdc6073d142e114c72c9e29a9d7ca03fad818c5d300ce2adc1fb0743407", size = 29032, upload-time = "2025-10-16T19:19:16.783Z" }, +] + +[[package]] +name = "jupyter-events" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonschema", extra = ["format-nongpl"] }, + { name = "packaging" }, + { name = "python-json-logger" }, + { name = "pyyaml" }, + { name = "referencing" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/c3/306d090461e4cf3cd91eceaff84bede12a8e52cd821c2d20c9a4fd728385/jupyter_events-0.12.0.tar.gz", hash = "sha256:fc3fce98865f6784c9cd0a56a20644fc6098f21c8c33834a8d9fe383c17e554b", size = 62196, upload-time = "2025-02-03T17:23:41.485Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/48/577993f1f99c552f18a0428731a755e06171f9902fa118c379eb7c04ea22/jupyter_events-0.12.0-py3-none-any.whl", hash = "sha256:6464b2fa5ad10451c3d35fabc75eab39556ae1e2853ad0c0cc31b656731a97fb", size = 19430, upload-time = "2025-02-03T17:23:38.643Z" }, +] + +[[package]] +name = "jupyter-lsp" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/5a/9066c9f8e94ee517133cd98dba393459a16cd48bba71a82f16a65415206c/jupyter_lsp-2.3.0.tar.gz", hash = "sha256:458aa59339dc868fb784d73364f17dbce8836e906cd75fd471a325cba02e0245", size = 54823, upload-time = "2025-08-27T17:47:34.671Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/60/1f6cee0c46263de1173894f0fafcb3475ded276c472c14d25e0280c18d6d/jupyter_lsp-2.3.0-py3-none-any.whl", hash = "sha256:e914a3cb2addf48b1c7710914771aaf1819d46b2e5a79b0f917b5478ec93f34f", size = 76687, upload-time = "2025-08-27T17:47:33.15Z" }, +] + +[[package]] +name = "jupyter-server" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "argon2-cffi" }, + { name = "jinja2" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "jupyter-events" }, + { name = "jupyter-server-terminals" }, + { name = "nbconvert" }, + { name = "nbformat" }, + { name = "overrides" }, + { name = "packaging" }, + { name = "prometheus-client" }, + { name = "pywinpty", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "pyzmq" }, + { name = "send2trash" }, + { name = "terminado" }, + { name = "tornado" }, + { name = "traitlets" }, + { name = "websocket-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/ac/e040ec363d7b6b1f11304cc9f209dac4517ece5d5e01821366b924a64a50/jupyter_server-2.17.0.tar.gz", hash = "sha256:c38ea898566964c888b4772ae1ed58eca84592e88251d2cfc4d171f81f7e99d5", size = 731949, upload-time = "2025-08-21T14:42:54.042Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/80/a24767e6ca280f5a49525d987bf3e4d7552bf67c8be07e8ccf20271f8568/jupyter_server-2.17.0-py3-none-any.whl", hash = "sha256:e8cb9c7db4251f51ed307e329b81b72ccf2056ff82d50524debde1ee1870e13f", size = 388221, upload-time = "2025-08-21T14:42:52.034Z" }, +] + +[[package]] +name = "jupyter-server-proxy" +version = "4.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "jupyter-server" }, + { name = "simpervisor" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/c8/ff6ecdbf55ac1a9253d53d87c60004ee508cfc852662b15fb33cd6ebfdbc/jupyter_server_proxy-4.4.0.tar.gz", hash = "sha256:e5732eb9c810c0caa997f90a2f15f7d09af638e7eea9c67eb5c43e9c1f0e1157", size = 136830, upload-time = "2024-08-29T12:49:22.906Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/c6/e4a1d9fdd22d40962befd82780a98b20b67ef9cafe87246e4955f44b8f09/jupyter_server_proxy-4.4.0-py3-none-any.whl", hash = "sha256:707b5c84810bb8863d50f6c6d50a386fec216149e11802b7d4c451b54a63a9a6", size = 37567, upload-time = "2024-08-29T12:49:20.89Z" }, +] + +[[package]] +name = "jupyter-server-terminals" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywinpty", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "terminado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/d5/562469734f476159e99a55426d697cbf8e7eb5efe89fb0e0b4f83a3d3459/jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269", size = 31430, upload-time = "2024-03-12T14:37:03.049Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/2d/2b32cdbe8d2a602f697a649798554e4f072115438e92249624e532e8aca6/jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa", size = 13656, upload-time = "2024-03-12T14:37:00.708Z" }, +] + +[[package]] +name = "jupyterlab" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "async-lru" }, + { name = "httpx" }, + { name = "ipykernel" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyter-lsp" }, + { name = "jupyter-server" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "packaging" }, + { name = "setuptools" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/21/413d142686a4e8f4268d985becbdb4daf060524726248e73be4773786987/jupyterlab-4.5.1.tar.gz", hash = "sha256:09da1ddfbd9eec18b5101dbb8515612aa1e47443321fb99503725a88e93d20d9", size = 23992251, upload-time = "2025-12-15T16:58:59.361Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/c3/acced767eecc11a70c65c45295db5396c4f0c1937874937d5a76d7b177b6/jupyterlab-4.5.1-py3-none-any.whl", hash = "sha256:31b059de96de0754ff1f2ce6279774b6aab8c34d7082e9752db58207c99bd514", size = 12384821, upload-time = "2025-12-15T16:58:55.563Z" }, +] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/90/51/9187be60d989df97f5f0aba133fa54e7300f17616e065d1ada7d7646b6d6/jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d", size = 512900, upload-time = "2023-11-23T09:26:37.44Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/dd/ead9d8ea85bf202d90cc513b533f9c363121c7792674f78e0d8a854b63b4/jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780", size = 15884, upload-time = "2023-11-23T09:26:34.325Z" }, +] + +[[package]] +name = "jupyterlab-server" +version = "2.28.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "babel" }, + { name = "jinja2" }, + { name = "json5" }, + { name = "jsonschema" }, + { name = "jupyter-server" }, + { name = "packaging" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/2c/90153f189e421e93c4bb4f9e3f59802a1f01abd2ac5cf40b152d7f735232/jupyterlab_server-2.28.0.tar.gz", hash = "sha256:35baa81898b15f93573e2deca50d11ac0ae407ebb688299d3a5213265033712c", size = 76996, upload-time = "2025-10-22T13:59:18.37Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/07/a000fe835f76b7e1143242ab1122e6362ef1c03f23f83a045c38859c2ae0/jupyterlab_server-2.28.0-py3-none-any.whl", hash = "sha256:e4355b148fdcf34d312bbbc80f22467d6d20460e8b8736bf235577dd18506968", size = 59830, upload-time = "2025-10-22T13:59:16.767Z" }, +] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.16" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/2d/ef58fed122b268c69c0aa099da20bc67657cdfb2e222688d5731bd5b971d/jupyterlab_widgets-3.0.16.tar.gz", hash = "sha256:423da05071d55cf27a9e602216d35a3a65a3e41cdf9c5d3b643b814ce38c19e0", size = 897423, upload-time = "2025-11-01T21:11:29.724Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/b5/36c712098e6191d1b4e349304ef73a8d06aed77e56ceaac8c0a306c7bda1/jupyterlab_widgets-3.0.16-py3-none-any.whl", hash = "sha256:45fa36d9c6422cf2559198e4db481aa243c7a32d9926b500781c830c80f7ecf8", size = 914926, upload-time = "2025-11-01T21:11:28.008Z" }, +] + +[[package]] +name = "kiwisolver" +version = "1.4.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/3c/85844f1b0feb11ee581ac23fe5fce65cd049a200c1446708cc1b7f922875/kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d", size = 97564, upload-time = "2025-08-10T21:27:49.279Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/ab/c80b0d5a9d8a1a65f4f815f2afff9798b12c3b9f31f1d304dd233dd920e2/kiwisolver-1.4.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eb14a5da6dc7642b0f3a18f13654847cd8b7a2550e2645a5bda677862b03ba16", size = 124167, upload-time = "2025-08-10T21:25:53.403Z" }, + { url = "https://files.pythonhosted.org/packages/a0/c0/27fe1a68a39cf62472a300e2879ffc13c0538546c359b86f149cc19f6ac3/kiwisolver-1.4.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39a219e1c81ae3b103643d2aedb90f1ef22650deb266ff12a19e7773f3e5f089", size = 66579, upload-time = "2025-08-10T21:25:54.79Z" }, + { url = "https://files.pythonhosted.org/packages/31/a2/a12a503ac1fd4943c50f9822678e8015a790a13b5490354c68afb8489814/kiwisolver-1.4.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2405a7d98604b87f3fc28b1716783534b1b4b8510d8142adca34ee0bc3c87543", size = 65309, upload-time = "2025-08-10T21:25:55.76Z" }, + { url = "https://files.pythonhosted.org/packages/66/e1/e533435c0be77c3f64040d68d7a657771194a63c279f55573188161e81ca/kiwisolver-1.4.9-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dc1ae486f9abcef254b5618dfb4113dd49f94c68e3e027d03cf0143f3f772b61", size = 1435596, upload-time = "2025-08-10T21:25:56.861Z" }, + { url = "https://files.pythonhosted.org/packages/67/1e/51b73c7347f9aabdc7215aa79e8b15299097dc2f8e67dee2b095faca9cb0/kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a1f570ce4d62d718dce3f179ee78dac3b545ac16c0c04bb363b7607a949c0d1", size = 1246548, upload-time = "2025-08-10T21:25:58.246Z" }, + { url = "https://files.pythonhosted.org/packages/21/aa/72a1c5d1e430294f2d32adb9542719cfb441b5da368d09d268c7757af46c/kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb27e7b78d716c591e88e0a09a2139c6577865d7f2e152488c2cc6257f460872", size = 1263618, upload-time = "2025-08-10T21:25:59.857Z" }, + { url = "https://files.pythonhosted.org/packages/a3/af/db1509a9e79dbf4c260ce0cfa3903ea8945f6240e9e59d1e4deb731b1a40/kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:15163165efc2f627eb9687ea5f3a28137217d217ac4024893d753f46bce9de26", size = 1317437, upload-time = "2025-08-10T21:26:01.105Z" }, + { url = "https://files.pythonhosted.org/packages/e0/f2/3ea5ee5d52abacdd12013a94130436e19969fa183faa1e7c7fbc89e9a42f/kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bdee92c56a71d2b24c33a7d4c2856bd6419d017e08caa7802d2963870e315028", size = 2195742, upload-time = "2025-08-10T21:26:02.675Z" }, + { url = "https://files.pythonhosted.org/packages/6f/9b/1efdd3013c2d9a2566aa6a337e9923a00590c516add9a1e89a768a3eb2fc/kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:412f287c55a6f54b0650bd9b6dce5aceddb95864a1a90c87af16979d37c89771", size = 2290810, upload-time = "2025-08-10T21:26:04.009Z" }, + { url = "https://files.pythonhosted.org/packages/fb/e5/cfdc36109ae4e67361f9bc5b41323648cb24a01b9ade18784657e022e65f/kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2c93f00dcba2eea70af2be5f11a830a742fe6b579a1d4e00f47760ef13be247a", size = 2461579, upload-time = "2025-08-10T21:26:05.317Z" }, + { url = "https://files.pythonhosted.org/packages/62/86/b589e5e86c7610842213994cdea5add00960076bef4ae290c5fa68589cac/kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f117e1a089d9411663a3207ba874f31be9ac8eaa5b533787024dc07aeb74f464", size = 2268071, upload-time = "2025-08-10T21:26:06.686Z" }, + { url = "https://files.pythonhosted.org/packages/3b/c6/f8df8509fd1eee6c622febe54384a96cfaf4d43bf2ccec7a0cc17e4715c9/kiwisolver-1.4.9-cp311-cp311-win_amd64.whl", hash = "sha256:be6a04e6c79819c9a8c2373317d19a96048e5a3f90bec587787e86a1153883c2", size = 73840, upload-time = "2025-08-10T21:26:07.94Z" }, + { url = "https://files.pythonhosted.org/packages/e2/2d/16e0581daafd147bc11ac53f032a2b45eabac897f42a338d0a13c1e5c436/kiwisolver-1.4.9-cp311-cp311-win_arm64.whl", hash = "sha256:0ae37737256ba2de764ddc12aed4956460277f00c4996d51a197e72f62f5eec7", size = 65159, upload-time = "2025-08-10T21:26:09.048Z" }, + { url = "https://files.pythonhosted.org/packages/a3/0f/36d89194b5a32c054ce93e586d4049b6c2c22887b0eb229c61c68afd3078/kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:720e05574713db64c356e86732c0f3c5252818d05f9df320f0ad8380641acea5", size = 60104, upload-time = "2025-08-10T21:27:43.287Z" }, + { url = "https://files.pythonhosted.org/packages/52/ba/4ed75f59e4658fd21fe7dde1fee0ac397c678ec3befba3fe6482d987af87/kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:17680d737d5335b552994a2008fab4c851bcd7de33094a82067ef3a576ff02fa", size = 58592, upload-time = "2025-08-10T21:27:44.314Z" }, + { url = "https://files.pythonhosted.org/packages/33/01/a8ea7c5ea32a9b45ceeaee051a04c8ed4320f5add3c51bfa20879b765b70/kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:85b5352f94e490c028926ea567fc569c52ec79ce131dadb968d3853e809518c2", size = 80281, upload-time = "2025-08-10T21:27:45.369Z" }, + { url = "https://files.pythonhosted.org/packages/da/e3/dbd2ecdce306f1d07a1aaf324817ee993aab7aee9db47ceac757deabafbe/kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:464415881e4801295659462c49461a24fb107c140de781d55518c4b80cb6790f", size = 78009, upload-time = "2025-08-10T21:27:46.376Z" }, + { url = "https://files.pythonhosted.org/packages/da/e9/0d4add7873a73e462aeb45c036a2dead2562b825aa46ba326727b3f31016/kiwisolver-1.4.9-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fb940820c63a9590d31d88b815e7a3aa5915cad3ce735ab45f0c730b39547de1", size = 73929, upload-time = "2025-08-10T21:27:48.236Z" }, +] + +[[package]] +name = "lark" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/34/28fff3ab31ccff1fd4f6c7c7b0ceb2b6968d8ea4950663eadcb5720591a0/lark-1.3.1.tar.gz", hash = "sha256:b426a7a6d6d53189d318f2b6236ab5d6429eaf09259f1ca33eb716eed10d2905", size = 382732, upload-time = "2025-10-27T18:25:56.653Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/3d/14ce75ef66813643812f3093ab17e46d3a206942ce7376d31ec2d36229e7/lark-1.3.1-py3-none-any.whl", hash = "sha256:c629b661023a014c37da873b4ff58a817398d12635d3bbb2c5a03be7fe5d1e12", size = 113151, upload-time = "2025-10-27T18:25:54.882Z" }, +] + +[[package]] +name = "lazy-loader" +version = "0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6b/c875b30a1ba490860c93da4cabf479e03f584eba06fe5963f6f6644653d8/lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1", size = 15431, upload-time = "2024-04-05T13:03:12.261Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc", size = 12097, upload-time = "2024-04-05T13:03:10.514Z" }, +] + +[[package]] +name = "lightning-utilities" +version = "0.15.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "setuptools" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b8/39/6fc58ca81492db047149b4b8fd385aa1bfb8c28cd7cacb0c7eb0c44d842f/lightning_utilities-0.15.2.tar.gz", hash = "sha256:cdf12f530214a63dacefd713f180d1ecf5d165338101617b4742e8f22c032e24", size = 31090, upload-time = "2025-08-06T13:57:39.242Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/73/3d757cb3fc16f0f9794dd289bcd0c4a031d9cf54d8137d6b984b2d02edf3/lightning_utilities-0.15.2-py3-none-any.whl", hash = "sha256:ad3ab1703775044bbf880dbf7ddaaac899396c96315f3aa1779cec9d618a9841", size = 29431, upload-time = "2025-08-06T13:57:38.046Z" }, +] + +[[package]] +name = "lmfit" +version = "1.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asteval" }, + { name = "dill" }, + { name = "numpy" }, + { name = "scipy" }, + { name = "uncertainties" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/e5/a35942aed2de95e228728c34609b51fe3ec9182398eac50d288eef313aa2/lmfit-1.3.4.tar.gz", hash = "sha256:3c22c28c43f717f6c5b4a3bd81e893a2149739c26a592c046f2e33c23cfbe497", size = 630720, upload-time = "2025-07-19T20:09:01.876Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/7e/7b91c89a4cf0f543a83be978657afb20c86af6d725253e319589dcc4ce52/lmfit-1.3.4-py3-none-any.whl", hash = "sha256:afce1593b42324d37ae2908249b0c55445e2f4c1a0474ff706a8e2f7b5d949fa", size = 97662, upload-time = "2025-07-19T20:09:00.32Z" }, +] + +[[package]] +name = "lxml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/88/262177de60548e5a2bfc46ad28232c9e9cbde697bd94132aeb80364675cb/lxml-6.0.2.tar.gz", hash = "sha256:cd79f3367bd74b317dda655dc8fcfa304d9eb6e4fb06b7168c5cf27f96e0cd62", size = 4073426, upload-time = "2025-09-22T04:04:59.287Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/d5/becbe1e2569b474a23f0c672ead8a29ac50b2dc1d5b9de184831bda8d14c/lxml-6.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:13e35cbc684aadf05d8711a5d1b5857c92e5e580efa9a0d2be197199c8def607", size = 8634365, upload-time = "2025-09-22T04:00:45.672Z" }, + { url = "https://files.pythonhosted.org/packages/28/66/1ced58f12e804644426b85d0bb8a4478ca77bc1761455da310505f1a3526/lxml-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b1675e096e17c6fe9c0e8c81434f5736c0739ff9ac6123c87c2d452f48fc938", size = 4650793, upload-time = "2025-09-22T04:00:47.783Z" }, + { url = "https://files.pythonhosted.org/packages/11/84/549098ffea39dfd167e3f174b4ce983d0eed61f9d8d25b7bf2a57c3247fc/lxml-6.0.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8ac6e5811ae2870953390452e3476694196f98d447573234592d30488147404d", size = 4944362, upload-time = "2025-09-22T04:00:49.845Z" }, + { url = "https://files.pythonhosted.org/packages/ac/bd/f207f16abf9749d2037453d56b643a7471d8fde855a231a12d1e095c4f01/lxml-6.0.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5aa0fc67ae19d7a64c3fe725dc9a1bb11f80e01f78289d05c6f62545affec438", size = 5083152, upload-time = "2025-09-22T04:00:51.709Z" }, + { url = "https://files.pythonhosted.org/packages/15/ae/bd813e87d8941d52ad5b65071b1affb48da01c4ed3c9c99e40abb266fbff/lxml-6.0.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de496365750cc472b4e7902a485d3f152ecf57bd3ba03ddd5578ed8ceb4c5964", size = 5023539, upload-time = "2025-09-22T04:00:53.593Z" }, + { url = "https://files.pythonhosted.org/packages/02/cd/9bfef16bd1d874fbe0cb51afb00329540f30a3283beb9f0780adbb7eec03/lxml-6.0.2-cp311-cp311-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:200069a593c5e40b8f6fc0d84d86d970ba43138c3e68619ffa234bc9bb806a4d", size = 5344853, upload-time = "2025-09-22T04:00:55.524Z" }, + { url = "https://files.pythonhosted.org/packages/b8/89/ea8f91594bc5dbb879734d35a6f2b0ad50605d7fb419de2b63d4211765cc/lxml-6.0.2-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7d2de809c2ee3b888b59f995625385f74629707c9355e0ff856445cdcae682b7", size = 5225133, upload-time = "2025-09-22T04:00:57.269Z" }, + { url = "https://files.pythonhosted.org/packages/b9/37/9c735274f5dbec726b2db99b98a43950395ba3d4a1043083dba2ad814170/lxml-6.0.2-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:b2c3da8d93cf5db60e8858c17684c47d01fee6405e554fb55018dd85fc23b178", size = 4677944, upload-time = "2025-09-22T04:00:59.052Z" }, + { url = "https://files.pythonhosted.org/packages/20/28/7dfe1ba3475d8bfca3878365075abe002e05d40dfaaeb7ec01b4c587d533/lxml-6.0.2-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:442de7530296ef5e188373a1ea5789a46ce90c4847e597856570439621d9c553", size = 5284535, upload-time = "2025-09-22T04:01:01.335Z" }, + { url = "https://files.pythonhosted.org/packages/e7/cf/5f14bc0de763498fc29510e3532bf2b4b3a1c1d5d0dff2e900c16ba021ef/lxml-6.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2593c77efde7bfea7f6389f1ab249b15ed4aa5bc5cb5131faa3b843c429fbedb", size = 5067343, upload-time = "2025-09-22T04:01:03.13Z" }, + { url = "https://files.pythonhosted.org/packages/1c/b0/bb8275ab5472f32b28cfbbcc6db7c9d092482d3439ca279d8d6fa02f7025/lxml-6.0.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:3e3cb08855967a20f553ff32d147e14329b3ae70ced6edc2f282b94afbc74b2a", size = 4725419, upload-time = "2025-09-22T04:01:05.013Z" }, + { url = "https://files.pythonhosted.org/packages/25/4c/7c222753bc72edca3b99dbadba1b064209bc8ed4ad448af990e60dcce462/lxml-6.0.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ed6c667fcbb8c19c6791bbf40b7268ef8ddf5a96940ba9404b9f9a304832f6c", size = 5275008, upload-time = "2025-09-22T04:01:07.327Z" }, + { url = "https://files.pythonhosted.org/packages/6c/8c/478a0dc6b6ed661451379447cdbec77c05741a75736d97e5b2b729687828/lxml-6.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b8f18914faec94132e5b91e69d76a5c1d7b0c73e2489ea8929c4aaa10b76bbf7", size = 5248906, upload-time = "2025-09-22T04:01:09.452Z" }, + { url = "https://files.pythonhosted.org/packages/2d/d9/5be3a6ab2784cdf9accb0703b65e1b64fcdd9311c9f007630c7db0cfcce1/lxml-6.0.2-cp311-cp311-win32.whl", hash = "sha256:6605c604e6daa9e0d7f0a2137bdc47a2e93b59c60a65466353e37f8272f47c46", size = 3610357, upload-time = "2025-09-22T04:01:11.102Z" }, + { url = "https://files.pythonhosted.org/packages/e2/7d/ca6fb13349b473d5732fb0ee3eec8f6c80fc0688e76b7d79c1008481bf1f/lxml-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e5867f2651016a3afd8dd2c8238baa66f1e2802f44bc17e236f547ace6647078", size = 4036583, upload-time = "2025-09-22T04:01:12.766Z" }, + { url = "https://files.pythonhosted.org/packages/ab/a2/51363b5ecd3eab46563645f3a2c3836a2fc67d01a1b87c5017040f39f567/lxml-6.0.2-cp311-cp311-win_arm64.whl", hash = "sha256:4197fb2534ee05fd3e7afaab5d8bfd6c2e186f65ea7f9cd6a82809c887bd1285", size = 3680591, upload-time = "2025-09-22T04:01:14.874Z" }, + { url = "https://files.pythonhosted.org/packages/0b/11/29d08bc103a62c0eba8016e7ed5aeebbf1e4312e83b0b1648dd203b0e87d/lxml-6.0.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1c06035eafa8404b5cf475bb37a9f6088b0aca288d4ccc9d69389750d5543700", size = 3949829, upload-time = "2025-09-22T04:04:45.608Z" }, + { url = "https://files.pythonhosted.org/packages/12/b3/52ab9a3b31e5ab8238da241baa19eec44d2ab426532441ee607165aebb52/lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c7d13103045de1bdd6fe5d61802565f1a3537d70cd3abf596aa0af62761921ee", size = 4226277, upload-time = "2025-09-22T04:04:47.754Z" }, + { url = "https://files.pythonhosted.org/packages/a0/33/1eaf780c1baad88224611df13b1c2a9dfa460b526cacfe769103ff50d845/lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a3c150a95fbe5ac91de323aa756219ef9cf7fde5a3f00e2281e30f33fa5fa4f", size = 4330433, upload-time = "2025-09-22T04:04:49.907Z" }, + { url = "https://files.pythonhosted.org/packages/7a/c1/27428a2ff348e994ab4f8777d3a0ad510b6b92d37718e5887d2da99952a2/lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60fa43be34f78bebb27812ed90f1925ec99560b0fa1decdb7d12b84d857d31e9", size = 4272119, upload-time = "2025-09-22T04:04:51.801Z" }, + { url = "https://files.pythonhosted.org/packages/f0/d0/3020fa12bcec4ab62f97aab026d57c2f0cfd480a558758d9ca233bb6a79d/lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:21c73b476d3cfe836be731225ec3421fa2f048d84f6df6a8e70433dff1376d5a", size = 4417314, upload-time = "2025-09-22T04:04:55.024Z" }, + { url = "https://files.pythonhosted.org/packages/6c/77/d7f491cbc05303ac6801651aabeb262d43f319288c1ea96c66b1d2692ff3/lxml-6.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:27220da5be049e936c3aca06f174e8827ca6445a4353a1995584311487fc4e3e", size = 3518768, upload-time = "2025-09-22T04:04:57.097Z" }, +] + +[[package]] +name = "lz4" +version = "4.4.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/51/f1b86d93029f418033dddf9b9f79c8d2641e7454080478ee2aab5123173e/lz4-4.4.5.tar.gz", hash = "sha256:5f0b9e53c1e82e88c10d7c180069363980136b9d7a8306c4dca4f760d60c39f0", size = 172886, upload-time = "2025-11-03T13:02:36.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/5b/6edcd23319d9e28b1bedf32768c3d1fd56eed8223960a2c47dacd2cec2af/lz4-4.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d6da84a26b3aa5da13a62e4b89ab36a396e9327de8cd48b436a3467077f8ccd4", size = 207391, upload-time = "2025-11-03T13:01:36.644Z" }, + { url = "https://files.pythonhosted.org/packages/34/36/5f9b772e85b3d5769367a79973b8030afad0d6b724444083bad09becd66f/lz4-4.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61d0ee03e6c616f4a8b69987d03d514e8896c8b1b7cc7598ad029e5c6aedfd43", size = 207146, upload-time = "2025-11-03T13:01:37.928Z" }, + { url = "https://files.pythonhosted.org/packages/04/f4/f66da5647c0d72592081a37c8775feacc3d14d2625bbdaabd6307c274565/lz4-4.4.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:33dd86cea8375d8e5dd001e41f321d0a4b1eb7985f39be1b6a4f466cd480b8a7", size = 1292623, upload-time = "2025-11-03T13:01:39.341Z" }, + { url = "https://files.pythonhosted.org/packages/85/fc/5df0f17467cdda0cad464a9197a447027879197761b55faad7ca29c29a04/lz4-4.4.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:609a69c68e7cfcfa9d894dc06be13f2e00761485b62df4e2472f1b66f7b405fb", size = 1279982, upload-time = "2025-11-03T13:01:40.816Z" }, + { url = "https://files.pythonhosted.org/packages/25/3b/b55cb577aa148ed4e383e9700c36f70b651cd434e1c07568f0a86c9d5fbb/lz4-4.4.5-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:75419bb1a559af00250b8f1360d508444e80ed4b26d9d40ec5b09fe7875cb989", size = 1368674, upload-time = "2025-11-03T13:01:42.118Z" }, + { url = "https://files.pythonhosted.org/packages/fb/31/e97e8c74c59ea479598e5c55cbe0b1334f03ee74ca97726e872944ed42df/lz4-4.4.5-cp311-cp311-win32.whl", hash = "sha256:12233624f1bc2cebc414f9efb3113a03e89acce3ab6f72035577bc61b270d24d", size = 88168, upload-time = "2025-11-03T13:01:43.282Z" }, + { url = "https://files.pythonhosted.org/packages/18/47/715865a6c7071f417bef9b57c8644f29cb7a55b77742bd5d93a609274e7e/lz4-4.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:8a842ead8ca7c0ee2f396ca5d878c4c40439a527ebad2b996b0444f0074ed004", size = 99491, upload-time = "2025-11-03T13:01:44.167Z" }, + { url = "https://files.pythonhosted.org/packages/14/e7/ac120c2ca8caec5c945e6356ada2aa5cfabd83a01e3170f264a5c42c8231/lz4-4.4.5-cp311-cp311-win_arm64.whl", hash = "sha256:83bc23ef65b6ae44f3287c38cbf82c269e2e96a26e560aa551735883388dcc4b", size = 91271, upload-time = "2025-11-03T13:01:45.016Z" }, +] + +[[package]] +name = "markdown" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/ab/7dd27d9d863b3376fcf23a5a13cb5d024aed1db46f963f1b5735ae43b3be/markdown-3.10.tar.gz", hash = "sha256:37062d4f2aa4b2b6b32aefb80faa300f82cc790cb949a35b8caede34f2b68c0e", size = 364931, upload-time = "2025-11-03T19:51:15.007Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/81/54e3ce63502cd085a0c556652a4e1b919c45a446bd1e5300e10c44c8c521/markdown-3.10-py3-none-any.whl", hash = "sha256:b5b99d6951e2e4948d939255596523444c0e677c669700b1d17aa4a8a464cb7c", size = 107678, upload-time = "2025-11-03T19:51:13.887Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/db/fefacb2136439fc8dd20e797950e749aa1f4997ed584c62cfb8ef7c2be0e/markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad", size = 11631, upload-time = "2025-09-27T18:36:18.185Z" }, + { url = "https://files.pythonhosted.org/packages/e1/2e/5898933336b61975ce9dc04decbc0a7f2fee78c30353c5efba7f2d6ff27a/markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a", size = 12058, upload-time = "2025-09-27T18:36:19.444Z" }, + { url = "https://files.pythonhosted.org/packages/1d/09/adf2df3699d87d1d8184038df46a9c80d78c0148492323f4693df54e17bb/markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50", size = 24287, upload-time = "2025-09-27T18:36:20.768Z" }, + { url = "https://files.pythonhosted.org/packages/30/ac/0273f6fcb5f42e314c6d8cd99effae6a5354604d461b8d392b5ec9530a54/markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf", size = 22940, upload-time = "2025-09-27T18:36:22.249Z" }, + { url = "https://files.pythonhosted.org/packages/19/ae/31c1be199ef767124c042c6c3e904da327a2f7f0cd63a0337e1eca2967a8/markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f", size = 21887, upload-time = "2025-09-27T18:36:23.535Z" }, + { url = "https://files.pythonhosted.org/packages/b2/76/7edcab99d5349a4532a459e1fe64f0b0467a3365056ae550d3bcf3f79e1e/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a", size = 23692, upload-time = "2025-09-27T18:36:24.823Z" }, + { url = "https://files.pythonhosted.org/packages/a4/28/6e74cdd26d7514849143d69f0bf2399f929c37dc2b31e6829fd2045b2765/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115", size = 21471, upload-time = "2025-09-27T18:36:25.95Z" }, + { url = "https://files.pythonhosted.org/packages/62/7e/a145f36a5c2945673e590850a6f8014318d5577ed7e5920a4b3448e0865d/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a", size = 22923, upload-time = "2025-09-27T18:36:27.109Z" }, + { url = "https://files.pythonhosted.org/packages/0f/62/d9c46a7f5c9adbeeeda52f5b8d802e1094e9717705a645efc71b0913a0a8/markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19", size = 14572, upload-time = "2025-09-27T18:36:28.045Z" }, + { url = "https://files.pythonhosted.org/packages/83/8a/4414c03d3f891739326e1783338e48fb49781cc915b2e0ee052aa490d586/markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01", size = 15077, upload-time = "2025-09-27T18:36:29.025Z" }, + { url = "https://files.pythonhosted.org/packages/35/73/893072b42e6862f319b5207adc9ae06070f095b358655f077f69a35601f0/markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c", size = 13876, upload-time = "2025-09-27T18:36:29.954Z" }, +] + +[[package]] +name = "matplotlib" +version = "3.10.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy" }, + { name = "cycler" }, + { name = "fonttools" }, + { name = "kiwisolver" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyparsing" }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/e2/d2d5295be2f44c678ebaf3544ba32d20c1f9ef08c49fe47f496180e1db15/matplotlib-3.10.7.tar.gz", hash = "sha256:a06ba7e2a2ef9131c79c49e63dad355d2d878413a0376c1727c8b9335ff731c7", size = 34804865, upload-time = "2025-10-09T00:28:00.669Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/bc/0fb489005669127ec13f51be0c6adc074d7cf191075dab1da9fe3b7a3cfc/matplotlib-3.10.7-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:53b492410a6cd66c7a471de6c924f6ede976e963c0f3097a3b7abfadddc67d0a", size = 8257507, upload-time = "2025-10-09T00:26:19.073Z" }, + { url = "https://files.pythonhosted.org/packages/e2/6a/d42588ad895279ff6708924645b5d2ed54a7fb2dc045c8a804e955aeace1/matplotlib-3.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d9749313deb729f08207718d29c86246beb2ea3fdba753595b55901dee5d2fd6", size = 8119565, upload-time = "2025-10-09T00:26:21.023Z" }, + { url = "https://files.pythonhosted.org/packages/10/b7/4aa196155b4d846bd749cf82aa5a4c300cf55a8b5e0dfa5b722a63c0f8a0/matplotlib-3.10.7-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2222c7ba2cbde7fe63032769f6eb7e83ab3227f47d997a8453377709b7fe3a5a", size = 8692668, upload-time = "2025-10-09T00:26:22.967Z" }, + { url = "https://files.pythonhosted.org/packages/e6/e7/664d2b97016f46683a02d854d730cfcf54ff92c1dafa424beebef50f831d/matplotlib-3.10.7-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e91f61a064c92c307c5a9dc8c05dc9f8a68f0a3be199d9a002a0622e13f874a1", size = 9521051, upload-time = "2025-10-09T00:26:25.041Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a3/37aef1404efa615f49b5758a5e0261c16dd88f389bc1861e722620e4a754/matplotlib-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6f1851eab59ca082c95df5a500106bad73672645625e04538b3ad0f69471ffcc", size = 9576878, upload-time = "2025-10-09T00:26:27.478Z" }, + { url = "https://files.pythonhosted.org/packages/33/cd/b145f9797126f3f809d177ca378de57c45413c5099c5990de2658760594a/matplotlib-3.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:6516ce375109c60ceec579e699524e9d504cd7578506f01150f7a6bc174a775e", size = 8115142, upload-time = "2025-10-09T00:26:29.774Z" }, + { url = "https://files.pythonhosted.org/packages/2e/39/63bca9d2b78455ed497fcf51a9c71df200a11048f48249038f06447fa947/matplotlib-3.10.7-cp311-cp311-win_arm64.whl", hash = "sha256:b172db79759f5f9bc13ef1c3ef8b9ee7b37b0247f987fbbbdaa15e4f87fd46a9", size = 7992439, upload-time = "2025-10-09T00:26:40.32Z" }, + { url = "https://files.pythonhosted.org/packages/58/8f/76d5dc21ac64a49e5498d7f0472c0781dae442dd266a67458baec38288ec/matplotlib-3.10.7-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:15112bcbaef211bd663fa935ec33313b948e214454d949b723998a43357b17b0", size = 8252283, upload-time = "2025-10-09T00:27:54.739Z" }, + { url = "https://files.pythonhosted.org/packages/27/0d/9c5d4c2317feb31d819e38c9f947c942f42ebd4eb935fc6fd3518a11eaa7/matplotlib-3.10.7-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d2a959c640cdeecdd2ec3136e8ea0441da59bcaf58d67e9c590740addba2cb68", size = 8116733, upload-time = "2025-10-09T00:27:56.406Z" }, + { url = "https://files.pythonhosted.org/packages/9a/cc/3fe688ff1355010937713164caacf9ed443675ac48a997bab6ed23b3f7c0/matplotlib-3.10.7-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3886e47f64611046bc1db523a09dd0a0a6bed6081e6f90e13806dd1d1d1b5e91", size = 8693919, upload-time = "2025-10-09T00:27:58.41Z" }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/74/97e72a36efd4ae2bccb3463284300f8953f199b5ffbc04cbbb0ec78f74b1/matplotlib_inline-0.2.1.tar.gz", hash = "sha256:e1ee949c340d771fc39e241ea75683deb94762c8fa5f2927ec57c83c4dffa9fe", size = 8110, upload-time = "2025-10-23T09:00:22.126Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76", size = 9516, upload-time = "2025-10-23T09:00:20.675Z" }, +] + +[[package]] +name = "mistune" +version = "3.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/02/a7fb8b21d4d55ac93cdcde9d3638da5dd0ebdd3a4fed76c7725e10b81cbe/mistune-3.1.4.tar.gz", hash = "sha256:b5a7f801d389f724ec702840c11d8fc48f2b33519102fc7ee739e8177b672164", size = 94588, upload-time = "2025-08-29T07:20:43.594Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/f0/8282d9641415e9e33df173516226b404d367a0fc55e1a60424a152913abc/mistune-3.1.4-py3-none-any.whl", hash = "sha256:93691da911e5d9d2e23bc54472892aff676df27a75274962ff9edc210364266d", size = 53481, upload-time = "2025-08-29T07:20:42.218Z" }, +] + +[[package]] +name = "ml-dtypes" +version = "0.5.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/4a/c27b42ed9b1c7d13d9ba8b6905dece787d6259152f2309338aed29b2447b/ml_dtypes-0.5.4.tar.gz", hash = "sha256:8ab06a50fb9bf9666dd0fe5dfb4676fa2b0ac0f31ecff72a6c3af8e22c063453", size = 692314, upload-time = "2025-11-17T22:32:31.031Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/5e/712092cfe7e5eb667b8ad9ca7c54442f21ed7ca8979745f1000e24cf8737/ml_dtypes-0.5.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6c7ecb74c4bd71db68a6bea1edf8da8c34f3d9fe218f038814fd1d310ac76c90", size = 679734, upload-time = "2025-11-17T22:31:39.223Z" }, + { url = "https://files.pythonhosted.org/packages/4f/cf/912146dfd4b5c0eea956836c01dcd2fce6c9c844b2691f5152aca196ce4f/ml_dtypes-0.5.4-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc11d7e8c44a65115d05e2ab9989d1e045125d7be8e05a071a48bc76eb6d6040", size = 5056165, upload-time = "2025-11-17T22:31:41.071Z" }, + { url = "https://files.pythonhosted.org/packages/a9/80/19189ea605017473660e43762dc853d2797984b3c7bf30ce656099add30c/ml_dtypes-0.5.4-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:19b9a53598f21e453ea2fbda8aa783c20faff8e1eeb0d7ab899309a0053f1483", size = 5034975, upload-time = "2025-11-17T22:31:42.758Z" }, + { url = "https://files.pythonhosted.org/packages/b4/24/70bd59276883fdd91600ca20040b41efd4902a923283c4d6edcb1de128d2/ml_dtypes-0.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:7c23c54a00ae43edf48d44066a7ec31e05fdc2eee0be2b8b50dd1903a1db94bb", size = 210742, upload-time = "2025-11-17T22:31:44.068Z" }, + { url = "https://files.pythonhosted.org/packages/a0/c9/64230ef14e40aa3f1cb254ef623bf812735e6bec7772848d19131111ac0d/ml_dtypes-0.5.4-cp311-cp311-win_arm64.whl", hash = "sha256:557a31a390b7e9439056644cb80ed0735a6e3e3bb09d67fd5687e4b04238d1de", size = 160709, upload-time = "2025-11-17T22:31:46.557Z" }, +] + +[[package]] +name = "more-itertools" +version = "10.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ea/5d/38b681d3fce7a266dd9ab73c66959406d565b3e85f21d5e66e1181d93721/more_itertools-10.8.0.tar.gz", hash = "sha256:f638ddf8a1a0d134181275fb5d58b086ead7c6a72429ad725c67503f13ba30bd", size = 137431, upload-time = "2025-09-02T15:23:11.018Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/8e/469e5a4a2f5855992e425f3cb33804cc07bf18d48f2db061aec61ce50270/more_itertools-10.8.0-py3-none-any.whl", hash = "sha256:52d4362373dcf7c52546bc4af9a86ee7c4579df9a8dc268be0a2f949d376cc9b", size = 69667, upload-time = "2025-09-02T15:23:09.635Z" }, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, +] + +[[package]] +name = "msgpack" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4d/f2/bfb55a6236ed8725a96b0aa3acbd0ec17588e6a2c3b62a93eb513ed8783f/msgpack-1.1.2.tar.gz", hash = "sha256:3b60763c1373dd60f398488069bcdc703cd08a711477b5d480eecc9f9626f47e", size = 173581, upload-time = "2025-10-08T09:15:56.596Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/97/560d11202bcd537abca693fd85d81cebe2107ba17301de42b01ac1677b69/msgpack-1.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e86a607e558d22985d856948c12a3fa7b42efad264dca8a3ebbcfa2735d786c", size = 82271, upload-time = "2025-10-08T09:14:49.967Z" }, + { url = "https://files.pythonhosted.org/packages/83/04/28a41024ccbd67467380b6fb440ae916c1e4f25e2cd4c63abe6835ac566e/msgpack-1.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:283ae72fc89da59aa004ba147e8fc2f766647b1251500182fac0350d8af299c0", size = 84914, upload-time = "2025-10-08T09:14:50.958Z" }, + { url = "https://files.pythonhosted.org/packages/71/46/b817349db6886d79e57a966346cf0902a426375aadc1e8e7a86a75e22f19/msgpack-1.1.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61c8aa3bd513d87c72ed0b37b53dd5c5a0f58f2ff9f26e1555d3bd7948fb7296", size = 416962, upload-time = "2025-10-08T09:14:51.997Z" }, + { url = "https://files.pythonhosted.org/packages/da/e0/6cc2e852837cd6086fe7d8406af4294e66827a60a4cf60b86575a4a65ca8/msgpack-1.1.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:454e29e186285d2ebe65be34629fa0e8605202c60fbc7c4c650ccd41870896ef", size = 426183, upload-time = "2025-10-08T09:14:53.477Z" }, + { url = "https://files.pythonhosted.org/packages/25/98/6a19f030b3d2ea906696cedd1eb251708e50a5891d0978b012cb6107234c/msgpack-1.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7bc8813f88417599564fafa59fd6f95be417179f76b40325b500b3c98409757c", size = 411454, upload-time = "2025-10-08T09:14:54.648Z" }, + { url = "https://files.pythonhosted.org/packages/b7/cd/9098fcb6adb32187a70b7ecaabf6339da50553351558f37600e53a4a2a23/msgpack-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bafca952dc13907bdfdedfc6a5f579bf4f292bdd506fadb38389afa3ac5b208e", size = 422341, upload-time = "2025-10-08T09:14:56.328Z" }, + { url = "https://files.pythonhosted.org/packages/e6/ae/270cecbcf36c1dc85ec086b33a51a4d7d08fc4f404bdbc15b582255d05ff/msgpack-1.1.2-cp311-cp311-win32.whl", hash = "sha256:602b6740e95ffc55bfb078172d279de3773d7b7db1f703b2f1323566b878b90e", size = 64747, upload-time = "2025-10-08T09:14:57.882Z" }, + { url = "https://files.pythonhosted.org/packages/2a/79/309d0e637f6f37e83c711f547308b91af02b72d2326ddd860b966080ef29/msgpack-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:d198d275222dc54244bf3327eb8cbe00307d220241d9cec4d306d49a44e85f68", size = 71633, upload-time = "2025-10-08T09:14:59.177Z" }, + { url = "https://files.pythonhosted.org/packages/73/4d/7c4e2b3d9b1106cd0aa6cb56cc57c6267f59fa8bfab7d91df5adc802c847/msgpack-1.1.2-cp311-cp311-win_arm64.whl", hash = "sha256:86f8136dfa5c116365a8a651a7d7484b65b13339731dd6faebb9a0242151c406", size = 64755, upload-time = "2025-10-08T09:15:00.48Z" }, +] + +[[package]] +name = "multidict" +version = "6.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/9e/5c727587644d67b2ed479041e4b1c58e30afc011e3d45d25bbe35781217c/multidict-6.7.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4d409aa42a94c0b3fa617708ef5276dfe81012ba6753a0370fcc9d0195d0a1fc", size = 76604, upload-time = "2025-10-06T14:48:54.277Z" }, + { url = "https://files.pythonhosted.org/packages/17/e4/67b5c27bd17c085a5ea8f1ec05b8a3e5cba0ca734bfcad5560fb129e70ca/multidict-6.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14c9e076eede3b54c636f8ce1c9c252b5f057c62131211f0ceeec273810c9721", size = 44715, upload-time = "2025-10-06T14:48:55.445Z" }, + { url = "https://files.pythonhosted.org/packages/4d/e1/866a5d77be6ea435711bef2a4291eed11032679b6b28b56b4776ab06ba3e/multidict-6.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c09703000a9d0fa3c3404b27041e574cc7f4df4c6563873246d0e11812a94b6", size = 44332, upload-time = "2025-10-06T14:48:56.706Z" }, + { url = "https://files.pythonhosted.org/packages/31/61/0c2d50241ada71ff61a79518db85ada85fdabfcf395d5968dae1cbda04e5/multidict-6.7.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a265acbb7bb33a3a2d626afbe756371dce0279e7b17f4f4eda406459c2b5ff1c", size = 245212, upload-time = "2025-10-06T14:48:58.042Z" }, + { url = "https://files.pythonhosted.org/packages/ac/e0/919666a4e4b57fff1b57f279be1c9316e6cdc5de8a8b525d76f6598fefc7/multidict-6.7.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51cb455de290ae462593e5b1cb1118c5c22ea7f0d3620d9940bf695cea5a4bd7", size = 246671, upload-time = "2025-10-06T14:49:00.004Z" }, + { url = "https://files.pythonhosted.org/packages/a1/cc/d027d9c5a520f3321b65adea289b965e7bcbd2c34402663f482648c716ce/multidict-6.7.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:db99677b4457c7a5c5a949353e125ba72d62b35f74e26da141530fbb012218a7", size = 225491, upload-time = "2025-10-06T14:49:01.393Z" }, + { url = "https://files.pythonhosted.org/packages/75/c4/bbd633980ce6155a28ff04e6a6492dd3335858394d7bb752d8b108708558/multidict-6.7.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f470f68adc395e0183b92a2f4689264d1ea4b40504a24d9882c27375e6662bb9", size = 257322, upload-time = "2025-10-06T14:49:02.745Z" }, + { url = "https://files.pythonhosted.org/packages/4c/6d/d622322d344f1f053eae47e033b0b3f965af01212de21b10bcf91be991fb/multidict-6.7.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0db4956f82723cc1c270de9c6e799b4c341d327762ec78ef82bb962f79cc07d8", size = 254694, upload-time = "2025-10-06T14:49:04.15Z" }, + { url = "https://files.pythonhosted.org/packages/a8/9f/78f8761c2705d4c6d7516faed63c0ebdac569f6db1bef95e0d5218fdc146/multidict-6.7.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3e56d780c238f9e1ae66a22d2adf8d16f485381878250db8d496623cd38b22bd", size = 246715, upload-time = "2025-10-06T14:49:05.967Z" }, + { url = "https://files.pythonhosted.org/packages/78/59/950818e04f91b9c2b95aab3d923d9eabd01689d0dcd889563988e9ea0fd8/multidict-6.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9d14baca2ee12c1a64740d4531356ba50b82543017f3ad6de0deb943c5979abb", size = 243189, upload-time = "2025-10-06T14:49:07.37Z" }, + { url = "https://files.pythonhosted.org/packages/7a/3d/77c79e1934cad2ee74991840f8a0110966d9599b3af95964c0cd79bb905b/multidict-6.7.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:295a92a76188917c7f99cda95858c822f9e4aae5824246bba9b6b44004ddd0a6", size = 237845, upload-time = "2025-10-06T14:49:08.759Z" }, + { url = "https://files.pythonhosted.org/packages/63/1b/834ce32a0a97a3b70f86437f685f880136677ac00d8bce0027e9fd9c2db7/multidict-6.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39f1719f57adbb767ef592a50ae5ebb794220d1188f9ca93de471336401c34d2", size = 246374, upload-time = "2025-10-06T14:49:10.574Z" }, + { url = "https://files.pythonhosted.org/packages/23/ef/43d1c3ba205b5dec93dc97f3fba179dfa47910fc73aaaea4f7ceb41cec2a/multidict-6.7.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:0a13fb8e748dfc94749f622de065dd5c1def7e0d2216dba72b1d8069a389c6ff", size = 253345, upload-time = "2025-10-06T14:49:12.331Z" }, + { url = "https://files.pythonhosted.org/packages/6b/03/eaf95bcc2d19ead522001f6a650ef32811aa9e3624ff0ad37c445c7a588c/multidict-6.7.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e3aa16de190d29a0ea1b48253c57d99a68492c8dd8948638073ab9e74dc9410b", size = 246940, upload-time = "2025-10-06T14:49:13.821Z" }, + { url = "https://files.pythonhosted.org/packages/e8/df/ec8a5fd66ea6cd6f525b1fcbb23511b033c3e9bc42b81384834ffa484a62/multidict-6.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a048ce45dcdaaf1defb76b2e684f997fb5abf74437b6cb7b22ddad934a964e34", size = 242229, upload-time = "2025-10-06T14:49:15.603Z" }, + { url = "https://files.pythonhosted.org/packages/8a/a2/59b405d59fd39ec86d1142630e9049243015a5f5291ba49cadf3c090c541/multidict-6.7.0-cp311-cp311-win32.whl", hash = "sha256:a90af66facec4cebe4181b9e62a68be65e45ac9b52b67de9eec118701856e7ff", size = 41308, upload-time = "2025-10-06T14:49:16.871Z" }, + { url = "https://files.pythonhosted.org/packages/32/0f/13228f26f8b882c34da36efa776c3b7348455ec383bab4a66390e42963ae/multidict-6.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:95b5ffa4349df2887518bb839409bcf22caa72d82beec453216802f475b23c81", size = 46037, upload-time = "2025-10-06T14:49:18.457Z" }, + { url = "https://files.pythonhosted.org/packages/84/1f/68588e31b000535a3207fd3c909ebeec4fb36b52c442107499c18a896a2a/multidict-6.7.0-cp311-cp311-win_arm64.whl", hash = "sha256:329aa225b085b6f004a4955271a7ba9f1087e39dcb7e65f6284a988264a63912", size = 43023, upload-time = "2025-10-06T14:49:19.648Z" }, + { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, +] + +[[package]] +name = "narwhals" +version = "2.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/a2/25208347aa4c2d82a265cf4bc0873aaf5069f525c0438146821e7fc19ef5/narwhals-2.11.0.tar.gz", hash = "sha256:d23f3ea7efc6b4d0355444a72de6b8fa3011175585246c3400c894a7583964af", size = 589233, upload-time = "2025-11-10T16:28:35.675Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/a1/4d21933898e23b011ae0528151b57a9230a62960d0919bf2ee48c7f5c20a/narwhals-2.11.0-py3-none-any.whl", hash = "sha256:a9795e1e44aa94e5ba6406ef1c5ee4c172414ced4f1aea4a79e5894f0c7378d4", size = 423069, upload-time = "2025-11-10T16:28:33.522Z" }, +] + +[[package]] +name = "natsort" +version = "8.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/a9/a0c57aee75f77794adaf35322f8b6404cbd0f89ad45c87197a937764b7d0/natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581", size = 76575, upload-time = "2023-06-20T04:17:19.925Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/82/7a9d0550484a62c6da82858ee9419f3dd1ccc9aa1c26a1e43da3ecd20b0d/natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c", size = 38268, upload-time = "2023-06-20T04:17:17.522Z" }, +] + +[[package]] +name = "nbclient" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "nbformat" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/87/66/7ffd18d58eae90d5721f9f39212327695b749e23ad44b3881744eaf4d9e8/nbclient-0.10.2.tar.gz", hash = "sha256:90b7fc6b810630db87a6d0c2250b1f0ab4cf4d3c27a299b0cde78a4ed3fd9193", size = 62424, upload-time = "2024-12-19T10:32:27.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/6d/e7fa07f03a4a7b221d94b4d586edb754a9b0dc3c9e2c93353e9fa4e0d117/nbclient-0.10.2-py3-none-any.whl", hash = "sha256:4ffee11e788b4a27fabeb7955547e4318a5298f34342a4bfd01f2e1faaeadc3d", size = 25434, upload-time = "2024-12-19T10:32:24.139Z" }, +] + +[[package]] +name = "nbconvert" +version = "7.16.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "bleach", extra = ["css"] }, + { name = "defusedxml" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyterlab-pygments" }, + { name = "markupsafe" }, + { name = "mistune" }, + { name = "nbclient" }, + { name = "nbformat" }, + { name = "packaging" }, + { name = "pandocfilters" }, + { name = "pygments" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/59/f28e15fc47ffb73af68a8d9b47367a8630d76e97ae85ad18271b9db96fdf/nbconvert-7.16.6.tar.gz", hash = "sha256:576a7e37c6480da7b8465eefa66c17844243816ce1ccc372633c6b71c3c0f582", size = 857715, upload-time = "2025-01-28T09:29:14.724Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/9a/cd673b2f773a12c992f41309ef81b99da1690426bd2f96957a7ade0d3ed7/nbconvert-7.16.6-py3-none-any.whl", hash = "sha256:1375a7b67e0c2883678c48e506dc320febb57685e5ee67faa51b18a90f3a712b", size = 258525, upload-time = "2025-01-28T09:29:12.551Z" }, +] + +[[package]] +name = "nbformat" +version = "5.10.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fastjsonschema" }, + { name = "jsonschema" }, + { name = "jupyter-core" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/fd/91545e604bc3dad7dca9ed03284086039b294c6b3d75c0d2fa45f9e9caf3/nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a", size = 142749, upload-time = "2024-04-04T11:20:37.371Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/82/0340caa499416c78e5d8f5f05947ae4bc3cba53c9f038ab6e9ed964e22f1/nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b", size = 78454, upload-time = "2024-04-04T11:20:34.895Z" }, +] + +[[package]] +name = "ndindex" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/dc/a0/f584c0b6b998e4981201a1383200663a725f556f439cf58d02a093cb9f91/ndindex-1.10.0.tar.gz", hash = "sha256:20e3a2f0a8ed4646abf0f13296aab0b5b9cc8c5bc182b71b5945e76eb6f558bb", size = 258688, upload-time = "2025-05-21T17:42:22.718Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/1c/a53253d68bb269e5591c39b96ae2c4dd671132a82f63d70aea486f76d70c/ndindex-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e42198c8636eaf468cf28b7e1700738de37841853f5f15a0671bad4c3876a85", size = 162556, upload-time = "2025-05-21T17:40:52.668Z" }, + { url = "https://files.pythonhosted.org/packages/0d/2a/4e268ff5992d4b42755ee19cf46c3e954632aadd57810db7173fe945ad47/ndindex-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ec9865e787eababc9aa1be973bf8545c044e2b68297fe37adf7aeefe0ec61f59", size = 161769, upload-time = "2025-05-21T17:40:54.55Z" }, + { url = "https://files.pythonhosted.org/packages/14/67/28ef988483e1ff446873150979b20fa87833c711fbe3a816e0e6a3e6e7d3/ndindex-1.10.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72377bc5d15229eeefa73a4370212d0bdb8992c76c2228df0771e0dcdeb5354a", size = 504542, upload-time = "2025-05-21T17:40:56.771Z" }, + { url = "https://files.pythonhosted.org/packages/79/d8/a4638485d17e5a236a7f8687a63229b4cc4737d018d8f8bdf18983419d5b/ndindex-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a8c9f85a1d6497a1fc3a8ac7faf64eef600f95d4330566ae7468e59b6da28d7", size = 528179, upload-time = "2025-05-21T17:40:58.859Z" }, + { url = "https://files.pythonhosted.org/packages/40/2a/a7c119db8332b85fa6886104ac388a771dd2b0ec35e4b2443d555c5e0e00/ndindex-1.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:560211699c4fa370c30edace212b4b61950934c3c9a7b3964f52f2dd09c6913a", size = 1642463, upload-time = "2025-05-21T17:41:01.234Z" }, + { url = "https://files.pythonhosted.org/packages/14/9a/41dd8270e9b0a411221c1c584fb088f0d43d750d596cf02e1f8b528c426d/ndindex-1.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:68e4ed3b5816d22cddf71478197c62ea2453a8f7dea0da57b52ce8b537c7a0c3", size = 1553373, upload-time = "2025-05-21T17:41:03.474Z" }, + { url = "https://files.pythonhosted.org/packages/6e/36/4d42edfc5f350b83801a473721927c4c01c210014bb2ea1a754e232871d3/ndindex-1.10.0-cp311-cp311-win32.whl", hash = "sha256:52adf006f99f21913300d93d8b08fdd9d12796ee2dc7a1737acd1beea5f7e7af", size = 148975, upload-time = "2025-05-21T17:41:05.65Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b3/ec2b3447e49d69f033edb003761d3e2e01f2e5fe8ab397140099920405aa/ndindex-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:b90559638d35dd3c7f3f46dced6a306935866f86ba5cbd35190ef954334c33b9", size = 156723, upload-time = "2025-05-21T17:41:07.952Z" }, + { url = "https://files.pythonhosted.org/packages/c3/61/1333424bdfcebdcea63f5ed86ac98dccaf07ebb7e1463ca845a06e321d91/ndindex-1.10.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:aa17ea725f85af9285b298f72ccc8012949c0916d4426b0215d1c556dd995246", size = 146929, upload-time = "2025-05-21T17:42:08.04Z" }, + { url = "https://files.pythonhosted.org/packages/eb/7c/0813615d958ec78c521b9c09518b1f49ec553a0bec0646b5f4ebbf33bdcb/ndindex-1.10.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:219fdef9d6a557913fd92418275088b46c727944356f3fe59f4f72d62efd6f3d", size = 146417, upload-time = "2025-05-21T17:42:09.534Z" }, + { url = "https://files.pythonhosted.org/packages/d8/a1/b340a47409253f05c78d400f98b43477549ad1a1f7a5358acb784c79ed48/ndindex-1.10.0-pp311-pypy311_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1962137fcb69c00e2db42d5d045f9b7413fc37f44b143e7ae4a8c2c68ba3832", size = 163867, upload-time = "2025-05-21T17:42:10.994Z" }, + { url = "https://files.pythonhosted.org/packages/02/24/e5192ffb87070e9ff2328d715e5aa3a7f6b673e86c1ee8f48136815564e1/ndindex-1.10.0-pp311-pypy311_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18c9c8271926fb16c59e827b61bb77f45ee31a824eaa50b386edcd77a6a7c9a3", size = 160644, upload-time = "2025-05-21T17:42:12.415Z" }, + { url = "https://files.pythonhosted.org/packages/09/c5/b894cc961460e608b869d91164e9f825e3bb0579defb37c0eea61dce584e/ndindex-1.10.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:76e4fb082c83ccbc67c7a64b80e33bc5dfe9379f30c3b40a865914ae79947071", size = 147721, upload-time = "2025-05-21T17:42:13.825Z" }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + +[[package]] +name = "networkx" +version = "3.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/4f/ccdb8ad3a38e583f214547fd2f7ff1fc160c43a75af88e6aec213404b96a/networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037", size = 2471065, upload-time = "2025-05-29T11:35:07.804Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/8d/776adee7bbf76365fdd7f2552710282c79a4ead5d2a46408c9043a2b70ba/networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec", size = 2034406, upload-time = "2025-05-29T11:35:04.961Z" }, +] + +[[package]] +name = "notebook" +version = "7.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, + { name = "jupyterlab" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "tornado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/a9/882707b0aa639e6d7d3e7df4bfbe07479d832e9a8f02d8471002a4ea6d65/notebook-7.5.1.tar.gz", hash = "sha256:b2fb4cef4d47d08c33aecce1c6c6e84be05436fbd791f88fce8df9fbca088b75", size = 14058696, upload-time = "2025-12-16T07:38:59.223Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/86/ca516cb58ad2cb2064124d31cf0fd8b012fca64bebeb26da2d2ddf03fc79/notebook-7.5.1-py3-none-any.whl", hash = "sha256:f4e2451c19910c33b88709b84537e11f6368c1cdff1aa0c43db701aea535dd44", size = 14468080, upload-time = "2025-12-16T07:38:55.644Z" }, +] + +[[package]] +name = "notebook-shim" +version = "0.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/54/d2/92fa3243712b9a3e8bafaf60aac366da1cada3639ca767ff4b5b3654ec28/notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb", size = 13167, upload-time = "2024-02-14T23:35:18.353Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/33/bd5b9137445ea4b680023eb0469b2bb969d61303dedb2aac6560ff3d14a1/notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef", size = 13307, upload-time = "2024-02-14T23:35:16.286Z" }, +] + +[[package]] +name = "numexpr" +version = "2.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/2f/fdba158c9dbe5caca9c3eca3eaffffb251f2fb8674bf8e2d0aed5f38d319/numexpr-2.14.1.tar.gz", hash = "sha256:4be00b1086c7b7a5c32e31558122b7b80243fe098579b170967da83f3152b48b", size = 119400, upload-time = "2025-10-13T16:17:27.351Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/a3/67999bdd1ed1f938d38f3fedd4969632f2f197b090e50505f7cc1fa82510/numexpr-2.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2d03fcb4644a12f70a14d74006f72662824da5b6128bf1bcd10cc3ed80e64c34", size = 163195, upload-time = "2025-10-13T16:16:31.212Z" }, + { url = "https://files.pythonhosted.org/packages/25/95/d64f680ea1fc56d165457287e0851d6708800f9fcea346fc1b9957942ee6/numexpr-2.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2773ee1133f77009a1fc2f34fe236f3d9823779f5f75450e183137d49f00499f", size = 152088, upload-time = "2025-10-13T16:16:33.186Z" }, + { url = "https://files.pythonhosted.org/packages/0e/7f/3bae417cb13ae08afd86d08bb0301c32440fe0cae4e6262b530e0819aeda/numexpr-2.14.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebe4980f9494b9f94d10d2e526edc29e72516698d3bf95670ba79415492212a4", size = 451126, upload-time = "2025-10-13T16:13:22.248Z" }, + { url = "https://files.pythonhosted.org/packages/4c/1a/edbe839109518364ac0bd9e918cf874c755bb2c128040e920f198c494263/numexpr-2.14.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2a381e5e919a745c9503bcefffc1c7f98c972c04ec58fc8e999ed1a929e01ba6", size = 442012, upload-time = "2025-10-13T16:14:51.416Z" }, + { url = "https://files.pythonhosted.org/packages/66/b1/be4ce99bff769a5003baddac103f34681997b31d4640d5a75c0e8ed59c78/numexpr-2.14.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d08856cfc1b440eb1caaa60515235369654321995dd68eb9377577392020f6cb", size = 1415975, upload-time = "2025-10-13T16:13:26.088Z" }, + { url = "https://files.pythonhosted.org/packages/e7/33/b33b8fdc032a05d9ebb44a51bfcd4b92c178a2572cd3e6c1b03d8a4b45b2/numexpr-2.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03130afa04edf83a7b590d207444f05a00363c9b9ea5d81c0f53b1ea13fad55a", size = 1464683, upload-time = "2025-10-13T16:14:58.87Z" }, + { url = "https://files.pythonhosted.org/packages/d0/b2/ddcf0ac6cf0a1d605e5aecd4281507fd79a9628a67896795ab2e975de5df/numexpr-2.14.1-cp311-cp311-win32.whl", hash = "sha256:db78fa0c9fcbaded3ae7453faf060bd7a18b0dc10299d7fcd02d9362be1213ed", size = 166838, upload-time = "2025-10-13T16:17:06.765Z" }, + { url = "https://files.pythonhosted.org/packages/64/72/4ca9bd97b2eb6dce9f5e70a3b6acec1a93e1fb9b079cb4cba2cdfbbf295d/numexpr-2.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:e9b2f957798c67a2428be96b04bce85439bed05efe78eb78e4c2ca43737578e7", size = 160069, upload-time = "2025-10-13T16:17:08.752Z" }, +] + +[[package]] +name = "numpy" +version = "2.3.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/f4/098d2270d52b41f1bd7db9fc288aaa0400cb48c2a3e2af6fa365d9720947/numpy-2.3.4.tar.gz", hash = "sha256:a7d018bfedb375a8d979ac758b120ba846a7fe764911a64465fd87b8729f4a6a", size = 20582187, upload-time = "2025-10-15T16:18:11.77Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/e7/0e07379944aa8afb49a556a2b54587b828eb41dc9adc56fb7615b678ca53/numpy-2.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e78aecd2800b32e8347ce49316d3eaf04aed849cd5b38e0af39f829a4e59f5eb", size = 21259519, upload-time = "2025-10-15T16:15:19.012Z" }, + { url = "https://files.pythonhosted.org/packages/d0/cb/5a69293561e8819b09e34ed9e873b9a82b5f2ade23dce4c51dc507f6cfe1/numpy-2.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7fd09cc5d65bda1e79432859c40978010622112e9194e581e3415a3eccc7f43f", size = 14452796, upload-time = "2025-10-15T16:15:23.094Z" }, + { url = "https://files.pythonhosted.org/packages/e4/04/ff11611200acd602a1e5129e36cfd25bf01ad8e5cf927baf2e90236eb02e/numpy-2.3.4-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1b219560ae2c1de48ead517d085bc2d05b9433f8e49d0955c82e8cd37bd7bf36", size = 5381639, upload-time = "2025-10-15T16:15:25.572Z" }, + { url = "https://files.pythonhosted.org/packages/ea/77/e95c757a6fe7a48d28a009267408e8aa382630cc1ad1db7451b3bc21dbb4/numpy-2.3.4-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:bafa7d87d4c99752d07815ed7a2c0964f8ab311eb8168f41b910bd01d15b6032", size = 6914296, upload-time = "2025-10-15T16:15:27.079Z" }, + { url = "https://files.pythonhosted.org/packages/a3/d2/137c7b6841c942124eae921279e5c41b1c34bab0e6fc60c7348e69afd165/numpy-2.3.4-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36dc13af226aeab72b7abad501d370d606326a0029b9f435eacb3b8c94b8a8b7", size = 14591904, upload-time = "2025-10-15T16:15:29.044Z" }, + { url = "https://files.pythonhosted.org/packages/bb/32/67e3b0f07b0aba57a078c4ab777a9e8e6bc62f24fb53a2337f75f9691699/numpy-2.3.4-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7b2f9a18b5ff9824a6af80de4f37f4ec3c2aab05ef08f51c77a093f5b89adda", size = 16939602, upload-time = "2025-10-15T16:15:31.106Z" }, + { url = "https://files.pythonhosted.org/packages/95/22/9639c30e32c93c4cee3ccdb4b09c2d0fbff4dcd06d36b357da06146530fb/numpy-2.3.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9984bd645a8db6ca15d850ff996856d8762c51a2239225288f08f9050ca240a0", size = 16372661, upload-time = "2025-10-15T16:15:33.546Z" }, + { url = "https://files.pythonhosted.org/packages/12/e9/a685079529be2b0156ae0c11b13d6be647743095bb51d46589e95be88086/numpy-2.3.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:64c5825affc76942973a70acf438a8ab618dbd692b84cd5ec40a0a0509edc09a", size = 18884682, upload-time = "2025-10-15T16:15:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/cf/85/f6f00d019b0cc741e64b4e00ce865a57b6bed945d1bbeb1ccadbc647959b/numpy-2.3.4-cp311-cp311-win32.whl", hash = "sha256:ed759bf7a70342f7817d88376eb7142fab9fef8320d6019ef87fae05a99874e1", size = 6570076, upload-time = "2025-10-15T16:15:38.225Z" }, + { url = "https://files.pythonhosted.org/packages/7d/10/f8850982021cb90e2ec31990291f9e830ce7d94eef432b15066e7cbe0bec/numpy-2.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:faba246fb30ea2a526c2e9645f61612341de1a83fb1e0c5edf4ddda5a9c10996", size = 13089358, upload-time = "2025-10-15T16:15:40.404Z" }, + { url = "https://files.pythonhosted.org/packages/d1/ad/afdd8351385edf0b3445f9e24210a9c3971ef4de8fd85155462fc4321d79/numpy-2.3.4-cp311-cp311-win_arm64.whl", hash = "sha256:4c01835e718bcebe80394fd0ac66c07cbb90147ebbdad3dcecd3f25de2ae7e2c", size = 10462292, upload-time = "2025-10-15T16:15:42.896Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b6/64898f51a86ec88ca1257a59c1d7fd077b60082a119affefcdf1dd0df8ca/numpy-2.3.4-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:6e274603039f924c0fe5cb73438fa9246699c78a6df1bd3decef9ae592ae1c05", size = 21131552, upload-time = "2025-10-15T16:17:55.845Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4c/f135dc6ebe2b6a3c77f4e4838fa63d350f85c99462012306ada1bd4bc460/numpy-2.3.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d149aee5c72176d9ddbc6803aef9c0f6d2ceeea7626574fc68518da5476fa346", size = 14377796, upload-time = "2025-10-15T16:17:58.308Z" }, + { url = "https://files.pythonhosted.org/packages/d0/a4/f33f9c23fcc13dd8412fc8614559b5b797e0aba9d8e01dfa8bae10c84004/numpy-2.3.4-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:6d34ed9db9e6395bb6cd33286035f73a59b058169733a9db9f85e650b88df37e", size = 5306904, upload-time = "2025-10-15T16:18:00.596Z" }, + { url = "https://files.pythonhosted.org/packages/28/af/c44097f25f834360f9fb960fa082863e0bad14a42f36527b2a121abdec56/numpy-2.3.4-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:fdebe771ca06bb8d6abce84e51dca9f7921fe6ad34a0c914541b063e9a68928b", size = 6819682, upload-time = "2025-10-15T16:18:02.32Z" }, + { url = "https://files.pythonhosted.org/packages/c5/8c/cd283b54c3c2b77e188f63e23039844f56b23bba1712318288c13fe86baf/numpy-2.3.4-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:957e92defe6c08211eb77902253b14fe5b480ebc5112bc741fd5e9cd0608f847", size = 14422300, upload-time = "2025-10-15T16:18:04.271Z" }, + { url = "https://files.pythonhosted.org/packages/b0/f0/8404db5098d92446b3e3695cf41c6f0ecb703d701cb0b7566ee2177f2eee/numpy-2.3.4-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13b9062e4f5c7ee5c7e5be96f29ba71bc5a37fed3d1d77c37390ae00724d296d", size = 16760806, upload-time = "2025-10-15T16:18:06.668Z" }, + { url = "https://files.pythonhosted.org/packages/95/8e/2844c3959ce9a63acc7c8e50881133d86666f0420bcde695e115ced0920f/numpy-2.3.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:81b3a59793523e552c4a96109dde028aa4448ae06ccac5a76ff6532a85558a7f", size = 12973130, upload-time = "2025-10-15T16:18:09.397Z" }, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.8.4.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" }, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.8.93" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994", size = 88040029, upload-time = "2025-03-07T01:42:13.562Z" }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90", size = 954765, upload-time = "2025-03-07T01:40:01.615Z" }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.10.2.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.3.3.83" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" }, +] + +[[package]] +name = "nvidia-cufile-cu12" +version = "1.13.1.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc", size = 1197834, upload-time = "2025-03-07T01:45:50.723Z" }, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.9.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9", size = 63619976, upload-time = "2025-03-07T01:46:23.323Z" }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.3.90" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.8.93" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" }, +] + +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.27.5" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457", size = 322348229, upload-time = "2025-06-26T04:11:28.385Z" }, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.8.93" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88", size = 39254836, upload-time = "2025-03-07T01:49:55.661Z" }, +] + +[[package]] +name = "nvidia-nvshmem-cu12" +version = "3.3.20" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/6c/99acb2f9eb85c29fc6f3a7ac4dccfd992e22666dd08a642b303311326a97/nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d00f26d3f9b2e3c3065be895e3059d6479ea5c638a3f38c9fec49b1b9dd7c1e5", size = 124657145, upload-time = "2025-08-04T20:25:19.995Z" }, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f", size = 89954, upload-time = "2025-03-07T01:42:44.131Z" }, +] + +[[package]] +name = "onnx" +version = "1.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ml-dtypes" }, + { name = "numpy" }, + { name = "protobuf" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bd/bf/824b13b7ea14c2d374b48a296cfa412442e5559326fbab5441a4fcb68924/onnx-1.20.0.tar.gz", hash = "sha256:1a93ec69996b4556062d552ed1aa0671978cfd3c17a40bf4c89a1ae169c6a4ad", size = 12049527, upload-time = "2025-12-01T18:14:34.679Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/9a/125ad5ed919d1782b26b0b4404e51adc44afd029be30d5a81b446dccd9c5/onnx-1.20.0-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:00dc8ae2c7b283f79623961f450b5515bd2c4b47a7027e7a1374ba49cef27768", size = 18341929, upload-time = "2025-12-01T18:13:43.79Z" }, + { url = "https://files.pythonhosted.org/packages/4d/3c/85280dd05396493f3e1b4feb7a3426715e344b36083229437f31d9788a01/onnx-1.20.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f62978ecfb8f320faba6704abd20253a5a79aacc4e5d39a9c061dd63d3b7574f", size = 17899362, upload-time = "2025-12-01T18:13:46.496Z" }, + { url = "https://files.pythonhosted.org/packages/26/db/e11cf9aaa6ccbcd27ea94d321020fef3207cba388bff96111e6431f97d1a/onnx-1.20.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:71177f8fd5c0dd90697bc281f5035f73707bdac83257a5c54d74403a1100ace9", size = 18119129, upload-time = "2025-12-01T18:13:49.662Z" }, + { url = "https://files.pythonhosted.org/packages/ef/0b/1b99e7ba5ccfa8ecb3509ec579c8520098d09b903ccd520026d60faa7c75/onnx-1.20.0-cp311-cp311-win32.whl", hash = "sha256:1d3d0308e2c194f4b782f51e78461b567fac8ce6871c0cf5452ede261683cc8f", size = 16364604, upload-time = "2025-12-01T18:13:52.691Z" }, + { url = "https://files.pythonhosted.org/packages/51/ab/7399817821d0d18ff67292ac183383e41f4f4ddff2047902f1b7b51d2d40/onnx-1.20.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a6de7dda77926c323b0e5a830dc9c2866ce350c1901229e193be1003a076c25", size = 16488019, upload-time = "2025-12-01T18:13:55.776Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e0/23059c11d9c0fb1951acec504a5cc86e1dd03d2eef3a98cf1941839f5322/onnx-1.20.0-cp311-cp311-win_arm64.whl", hash = "sha256:afc4cf83ce5d547ebfbb276dae8eb0ec836254a8698d462b4ba5f51e717fd1ae", size = 16446841, upload-time = "2025-12-01T18:13:58.091Z" }, + { url = "https://files.pythonhosted.org/packages/5e/19/2caa972a31014a8cb4525f715f2a75d93caef9d4b9da2809cc05d0489e43/onnx-1.20.0-cp312-abi3-macosx_12_0_universal2.whl", hash = "sha256:31efe37d7d1d659091f34ddd6a31780334acf7c624176832db9a0a8ececa8fb5", size = 18340913, upload-time = "2025-12-01T18:14:00.477Z" }, + { url = "https://files.pythonhosted.org/packages/78/bb/b98732309f2f6beb4cdcf7b955d7bbfd75a191185370ee21233373db381e/onnx-1.20.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d75da05e743eb9a11ff155a775cae5745e71f1cd0ca26402881b8f20e8d6e449", size = 17896118, upload-time = "2025-12-01T18:14:03.239Z" }, + { url = "https://files.pythonhosted.org/packages/84/a7/38aa564871d062c11538d65c575af9c7e057be880c09ecbd899dd1abfa83/onnx-1.20.0-cp312-abi3-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02e0d72ab09a983fce46686b155a5049898558d9f3bc6e8515120d6c40666318", size = 18115415, upload-time = "2025-12-01T18:14:06.261Z" }, + { url = "https://files.pythonhosted.org/packages/3b/17/a600b62cf4ad72976c66f83ce9e324205af434706ad5ec0e35129e125aef/onnx-1.20.0-cp312-abi3-win32.whl", hash = "sha256:392ca68b34b97e172d33b507e1e7bfdf2eea96603e6e7ff109895b82ff009dc7", size = 16363019, upload-time = "2025-12-01T18:14:09.16Z" }, + { url = "https://files.pythonhosted.org/packages/9c/3b/5146ba0a89f73c026bb468c49612bab8d005aa28155ebf06cf5f2eb8d36c/onnx-1.20.0-cp312-abi3-win_amd64.whl", hash = "sha256:259b05758d41645f5545c09f887187662b350d40db8d707c33c94a4f398e1733", size = 16485934, upload-time = "2025-12-01T18:14:13.046Z" }, + { url = "https://files.pythonhosted.org/packages/f3/bc/d251b97395e721b3034e9578d4d4d9fb33aac4197ae16ce8c7ed79a26dce/onnx-1.20.0-cp312-abi3-win_arm64.whl", hash = "sha256:2d25a9e1fde44bc69988e50e2211f62d6afcd01b0fd6dfd23429fd978a35d32f", size = 16444946, upload-time = "2025-12-01T18:14:15.801Z" }, +] + +[[package]] +name = "onnxruntime" +version = "1.23.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coloredlogs" }, + { name = "flatbuffers" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "sympy" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/be/467b00f09061572f022ffd17e49e49e5a7a789056bad95b54dfd3bee73ff/onnxruntime-1.23.2-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:6f91d2c9b0965e86827a5ba01531d5b669770b01775b23199565d6c1f136616c", size = 17196113, upload-time = "2025-10-22T03:47:33.526Z" }, + { url = "https://files.pythonhosted.org/packages/9f/a8/3c23a8f75f93122d2b3410bfb74d06d0f8da4ac663185f91866b03f7da1b/onnxruntime-1.23.2-cp311-cp311-macosx_13_0_x86_64.whl", hash = "sha256:87d8b6eaf0fbeb6835a60a4265fde7a3b60157cf1b2764773ac47237b4d48612", size = 19153857, upload-time = "2025-10-22T03:46:37.578Z" }, + { url = "https://files.pythonhosted.org/packages/3f/d8/506eed9af03d86f8db4880a4c47cd0dffee973ef7e4f4cff9f1d4bcf7d22/onnxruntime-1.23.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bbfd2fca76c855317568c1b36a885ddea2272c13cb0e395002c402f2360429a6", size = 15220095, upload-time = "2025-10-22T03:46:24.769Z" }, + { url = "https://files.pythonhosted.org/packages/e9/80/113381ba832d5e777accedc6cb41d10f9eca82321ae31ebb6bcede530cea/onnxruntime-1.23.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da44b99206e77734c5819aa2142c69e64f3b46edc3bd314f6a45a932defc0b3e", size = 17372080, upload-time = "2025-10-22T03:47:00.265Z" }, + { url = "https://files.pythonhosted.org/packages/3a/db/1b4a62e23183a0c3fe441782462c0ede9a2a65c6bbffb9582fab7c7a0d38/onnxruntime-1.23.2-cp311-cp311-win_amd64.whl", hash = "sha256:902c756d8b633ce0dedd889b7c08459433fbcf35e9c38d1c03ddc020f0648c6e", size = 13468349, upload-time = "2025-10-22T03:47:25.783Z" }, +] + +[[package]] +name = "open3d" +version = "0.19.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "addict" }, + { name = "configargparse" }, + { name = "dash" }, + { name = "flask" }, + { name = "matplotlib" }, + { name = "nbformat" }, + { name = "numpy" }, + { name = "pandas" }, + { name = "pillow" }, + { name = "pyquaternion" }, + { name = "pyyaml" }, + { name = "scikit-learn" }, + { name = "tqdm" }, + { name = "werkzeug" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/37/8d1746fcb58c37a9bd868fdca9a36c25b3c277bd764b7146419d11d2a58d/open3d-0.19.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:117702467bfb1602e9ae0ee5e2c7bcf573ebcd227b36a26f9f08425b52c89929", size = 103098641, upload-time = "2025-01-08T07:26:12.371Z" }, + { url = "https://files.pythonhosted.org/packages/bc/50/339bae21d0078cc3d3735e8eaf493a353a17dcc95d76bcefaa8edcf723d3/open3d-0.19.0-cp311-cp311-manylinux_2_31_x86_64.whl", hash = "sha256:678017392f6cc64a19d83afeb5329ffe8196893de2432f4c258eaaa819421bb5", size = 447683616, upload-time = "2025-01-08T07:22:48.098Z" }, + { url = "https://files.pythonhosted.org/packages/a3/3c/358f1cc5b034dc6a785408b7aa7643e503229d890bcbc830cda9fce778b1/open3d-0.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:02091c309708f09da1167d2ea475e05d19f5e81dff025145f3afd9373cbba61f", size = 69151111, upload-time = "2025-01-08T07:27:22.662Z" }, +] + +[[package]] +name = "opencv-python" +version = "4.11.0.86" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/06/68c27a523103dad5837dc5b87e71285280c4f098c60e4fe8a8db6486ab09/opencv-python-4.11.0.86.tar.gz", hash = "sha256:03d60ccae62304860d232272e4a4fda93c39d595780cb40b161b310244b736a4", size = 95171956, upload-time = "2025-01-16T13:52:24.737Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/4d/53b30a2a3ac1f75f65a59eb29cf2ee7207ce64867db47036ad61743d5a23/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:432f67c223f1dc2824f5e73cdfcd9db0efc8710647d4e813012195dc9122a52a", size = 37326322, upload-time = "2025-01-16T13:52:25.887Z" }, + { url = "https://files.pythonhosted.org/packages/3b/84/0a67490741867eacdfa37bc18df96e08a9d579583b419010d7f3da8ff503/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:9d05ef13d23fe97f575153558653e2d6e87103995d54e6a35db3f282fe1f9c66", size = 56723197, upload-time = "2025-01-16T13:55:21.222Z" }, + { url = "https://files.pythonhosted.org/packages/f3/bd/29c126788da65c1fb2b5fb621b7fed0ed5f9122aa22a0868c5e2c15c6d23/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b92ae2c8852208817e6776ba1ea0d6b1e0a1b5431e971a2a0ddd2a8cc398202", size = 42230439, upload-time = "2025-01-16T13:51:35.822Z" }, + { url = "https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b02611523803495003bd87362db3e1d2a0454a6a63025dc6658a9830570aa0d", size = 62986597, upload-time = "2025-01-16T13:52:08.836Z" }, + { url = "https://files.pythonhosted.org/packages/fb/d7/1d5941a9dde095468b288d989ff6539dd69cd429dbf1b9e839013d21b6f0/opencv_python-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:810549cb2a4aedaa84ad9a1c92fbfdfc14090e2749cedf2c1589ad8359aa169b", size = 29384337, upload-time = "2025-01-16T13:52:13.549Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec", size = 39488044, upload-time = "2025-01-16T13:52:21.928Z" }, +] + +[[package]] +name = "overrides" +version = "7.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/36/86/b585f53236dec60aba864e050778b25045f857e17f6e5ea0ae95fe80edd2/overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a", size = 22812, upload-time = "2024-01-27T21:01:33.423Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/ab/fc8290c6a4c722e5514d80f62b2dc4c4df1a68a41d1364e625c35990fcf3/overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49", size = 17832, upload-time = "2024-01-27T21:01:31.393Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pandas" +version = "2.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "python-dateutil" }, + { name = "pytz" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/33/01/d40b85317f86cf08d853a4f495195c73815fdf205eef3993821720274518/pandas-2.3.3.tar.gz", hash = "sha256:e05e1af93b977f7eafa636d043f9f94c7ee3ac81af99c13508215942e64c993b", size = 4495223, upload-time = "2025-09-29T23:34:51.853Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/fa/7ac648108144a095b4fb6aa3de1954689f7af60a14cf25583f4960ecb878/pandas-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:602b8615ebcc4a0c1751e71840428ddebeb142ec02c786e8ad6b1ce3c8dec523", size = 11578790, upload-time = "2025-09-29T23:18:30.065Z" }, + { url = "https://files.pythonhosted.org/packages/9b/35/74442388c6cf008882d4d4bdfc4109be87e9b8b7ccd097ad1e7f006e2e95/pandas-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8fe25fc7b623b0ef6b5009149627e34d2a4657e880948ec3c840e9402e5c1b45", size = 10833831, upload-time = "2025-09-29T23:38:56.071Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e4/de154cbfeee13383ad58d23017da99390b91d73f8c11856f2095e813201b/pandas-2.3.3-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b468d3dad6ff947df92dcb32ede5b7bd41a9b3cceef0a30ed925f6d01fb8fa66", size = 12199267, upload-time = "2025-09-29T23:18:41.627Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c9/63f8d545568d9ab91476b1818b4741f521646cbdd151c6efebf40d6de6f7/pandas-2.3.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b98560e98cb334799c0b07ca7967ac361a47326e9b4e5a7dfb5ab2b1c9d35a1b", size = 12789281, upload-time = "2025-09-29T23:18:56.834Z" }, + { url = "https://files.pythonhosted.org/packages/f2/00/a5ac8c7a0e67fd1a6059e40aa08fa1c52cc00709077d2300e210c3ce0322/pandas-2.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37b5848ba49824e5c30bedb9c830ab9b7751fd049bc7914533e01c65f79791", size = 13240453, upload-time = "2025-09-29T23:19:09.247Z" }, + { url = "https://files.pythonhosted.org/packages/27/4d/5c23a5bc7bd209231618dd9e606ce076272c9bc4f12023a70e03a86b4067/pandas-2.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:db4301b2d1f926ae677a751eb2bd0e8c5f5319c9cb3f88b0becbbb0b07b34151", size = 13890361, upload-time = "2025-09-29T23:19:25.342Z" }, + { url = "https://files.pythonhosted.org/packages/8e/59/712db1d7040520de7a4965df15b774348980e6df45c129b8c64d0dbe74ef/pandas-2.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:f086f6fe114e19d92014a1966f43a3e62285109afe874f067f5abbdcbb10e59c", size = 11348702, upload-time = "2025-09-29T23:19:38.296Z" }, +] + +[[package]] +name = "pandocfilters" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/70/6f/3dd4940bbe001c06a65f88e36bad298bc7a0de5036115639926b0c5c0458/pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e", size = 8454, upload-time = "2024-01-18T20:08:13.726Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/af/4fbc8cab944db5d21b7e2a5b8e9211a03a79852b1157e2c102fcc61ac440/pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc", size = 8663, upload-time = "2024-01-18T20:08:11.28Z" }, +] + +[[package]] +name = "parso" +version = "0.8.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d4/de/53e0bcf53d13e005bd8c92e7855142494f41171b34c2536b86187474184d/parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a", size = 401205, upload-time = "2025-08-23T15:15:28.028Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887", size = 106668, upload-time = "2025-08-23T15:15:25.663Z" }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, +] + +[[package]] +name = "pillow" +version = "12.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/cace85a1b0c9775a9f8f5d5423c8261c858760e2466c79b2dd184638b056/pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353", size = 47008828, upload-time = "2025-10-15T18:24:14.008Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/5a/a2f6773b64edb921a756eb0729068acad9fc5208a53f4a349396e9436721/pillow-12.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc", size = 5289798, upload-time = "2025-10-15T18:21:47.763Z" }, + { url = "https://files.pythonhosted.org/packages/2e/05/069b1f8a2e4b5a37493da6c5868531c3f77b85e716ad7a590ef87d58730d/pillow-12.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257", size = 4650589, upload-time = "2025-10-15T18:21:49.515Z" }, + { url = "https://files.pythonhosted.org/packages/61/e3/2c820d6e9a36432503ead175ae294f96861b07600a7156154a086ba7111a/pillow-12.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642", size = 6230472, upload-time = "2025-10-15T18:21:51.052Z" }, + { url = "https://files.pythonhosted.org/packages/4f/89/63427f51c64209c5e23d4d52071c8d0f21024d3a8a487737caaf614a5795/pillow-12.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3", size = 8033887, upload-time = "2025-10-15T18:21:52.604Z" }, + { url = "https://files.pythonhosted.org/packages/f6/1b/c9711318d4901093c15840f268ad649459cd81984c9ec9887756cca049a5/pillow-12.0.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c", size = 6343964, upload-time = "2025-10-15T18:21:54.619Z" }, + { url = "https://files.pythonhosted.org/packages/41/1e/db9470f2d030b4995083044cd8738cdd1bf773106819f6d8ba12597d5352/pillow-12.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227", size = 7034756, upload-time = "2025-10-15T18:21:56.151Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b0/6177a8bdd5ee4ed87cba2de5a3cc1db55ffbbec6176784ce5bb75aa96798/pillow-12.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b", size = 6458075, upload-time = "2025-10-15T18:21:57.759Z" }, + { url = "https://files.pythonhosted.org/packages/bc/5e/61537aa6fa977922c6a03253a0e727e6e4a72381a80d63ad8eec350684f2/pillow-12.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e", size = 7125955, upload-time = "2025-10-15T18:21:59.372Z" }, + { url = "https://files.pythonhosted.org/packages/1f/3d/d5033539344ee3cbd9a4d69e12e63ca3a44a739eb2d4c8da350a3d38edd7/pillow-12.0.0-cp311-cp311-win32.whl", hash = "sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739", size = 6298440, upload-time = "2025-10-15T18:22:00.982Z" }, + { url = "https://files.pythonhosted.org/packages/4d/42/aaca386de5cc8bd8a0254516957c1f265e3521c91515b16e286c662854c4/pillow-12.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e", size = 6999256, upload-time = "2025-10-15T18:22:02.617Z" }, + { url = "https://files.pythonhosted.org/packages/ba/f1/9197c9c2d5708b785f631a6dfbfa8eb3fb9672837cb92ae9af812c13b4ed/pillow-12.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d", size = 2436025, upload-time = "2025-10-15T18:22:04.598Z" }, + { url = "https://files.pythonhosted.org/packages/1d/b3/582327e6c9f86d037b63beebe981425d6811104cb443e8193824ef1a2f27/pillow-12.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8", size = 5215068, upload-time = "2025-10-15T18:23:59.594Z" }, + { url = "https://files.pythonhosted.org/packages/fd/d6/67748211d119f3b6540baf90f92fae73ae51d5217b171b0e8b5f7e5d558f/pillow-12.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a", size = 4614994, upload-time = "2025-10-15T18:24:01.669Z" }, + { url = "https://files.pythonhosted.org/packages/2d/e1/f8281e5d844c41872b273b9f2c34a4bf64ca08905668c8ae730eedc7c9fa/pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197", size = 5246639, upload-time = "2025-10-15T18:24:03.403Z" }, + { url = "https://files.pythonhosted.org/packages/94/5a/0d8ab8ffe8a102ff5df60d0de5af309015163bf710c7bb3e8311dd3b3ad0/pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c", size = 6986839, upload-time = "2025-10-15T18:24:05.344Z" }, + { url = "https://files.pythonhosted.org/packages/20/2e/3434380e8110b76cd9eb00a363c484b050f949b4bbe84ba770bb8508a02c/pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e", size = 5313505, upload-time = "2025-10-15T18:24:07.137Z" }, + { url = "https://files.pythonhosted.org/packages/57/ca/5a9d38900d9d74785141d6580950fe705de68af735ff6e727cb911b64740/pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76", size = 5963654, upload-time = "2025-10-15T18:24:09.579Z" }, + { url = "https://files.pythonhosted.org/packages/95/7e/f896623c3c635a90537ac093c6a618ebe1a90d87206e42309cb5d98a1b9e/pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5", size = 6997850, upload-time = "2025-10-15T18:24:11.495Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/61/33/9611380c2bdb1225fdef633e2a9610622310fed35ab11dac9620972ee088/platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312", size = 21632, upload-time = "2025-10-08T17:44:48.791Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/cb/ac7874b3e5d58441674fb70742e6c374b28b0c7cb988d37d991cde47166c/platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3", size = 18651, upload-time = "2025-10-08T17:44:47.223Z" }, +] + +[[package]] +name = "plotly" +version = "6.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "narwhals" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/e6/b768650072837505804bed4790c5449ba348a3b720e27ca7605414e998cd/plotly-6.4.0.tar.gz", hash = "sha256:68c6db2ed2180289ef978f087841148b7efda687552276da15a6e9b92107052a", size = 7012379, upload-time = "2025-11-04T17:59:26.45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/ae/89b45ccccfeebc464c9233de5675990f75241b8ee4cd63227800fdf577d1/plotly-6.4.0-py3-none-any.whl", hash = "sha256:a1062eafbdc657976c2eedd276c90e184ccd6c21282a5e9ee8f20efca9c9a4c5", size = 9892458, upload-time = "2025-11-04T17:59:22.622Z" }, +] + +[[package]] +name = "pooch" +version = "1.8.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "platformdirs" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/77/b3d3e00c696c16cf99af81ef7b1f5fe73bd2a307abca41bd7605429fe6e5/pooch-1.8.2.tar.gz", hash = "sha256:76561f0de68a01da4df6af38e9955c4c9d1a5c90da73f7e40276a5728ec83d10", size = 59353, upload-time = "2024-06-06T16:53:46.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/87/77cc11c7a9ea9fd05503def69e3d18605852cd0d4b0d3b8f15bbeb3ef1d1/pooch-1.8.2-py3-none-any.whl", hash = "sha256:3529a57096f7198778a5ceefd5ac3ef0e4d06a6ddaf9fc2d609b806f25302c47", size = 64574, upload-time = "2024-06-06T16:53:44.343Z" }, +] + +[[package]] +name = "prometheus-client" +version = "0.23.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/23/53/3edb5d68ecf6b38fcbcc1ad28391117d2a322d9a1a3eff04bfdb184d8c3b/prometheus_client-0.23.1.tar.gz", hash = "sha256:6ae8f9081eaaaf153a2e959d2e6c4f4fb57b12ef76c8c7980202f1e57b48b2ce", size = 80481, upload-time = "2025-09-18T20:47:25.043Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b8/db/14bafcb4af2139e046d03fd00dea7873e48eafe18b7d2797e73d6681f210/prometheus_client-0.23.1-py3-none-any.whl", hash = "sha256:dd1913e6e76b59cfe44e7a4b83e01afc9873c1bdfd2ed8739f1e76aeca115f99", size = 61145, upload-time = "2025-09-18T20:47:23.875Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, +] + +[[package]] +name = "propcache" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/da/e9fc233cf63743258bff22b3dfa7ea5baef7b5bc324af47a0ad89b8ffc6f/propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d", size = 46442, upload-time = "2025-10-08T19:49:02.291Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/d4/4e2c9aaf7ac2242b9358f98dccd8f90f2605402f5afeff6c578682c2c491/propcache-0.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:60a8fda9644b7dfd5dece8c61d8a85e271cb958075bfc4e01083c148b61a7caf", size = 80208, upload-time = "2025-10-08T19:46:24.597Z" }, + { url = "https://files.pythonhosted.org/packages/c2/21/d7b68e911f9c8e18e4ae43bdbc1e1e9bbd971f8866eb81608947b6f585ff/propcache-0.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c30b53e7e6bda1d547cabb47c825f3843a0a1a42b0496087bb58d8fedf9f41b5", size = 45777, upload-time = "2025-10-08T19:46:25.733Z" }, + { url = "https://files.pythonhosted.org/packages/d3/1d/11605e99ac8ea9435651ee71ab4cb4bf03f0949586246476a25aadfec54a/propcache-0.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6918ecbd897443087a3b7cd978d56546a812517dcaaca51b49526720571fa93e", size = 47647, upload-time = "2025-10-08T19:46:27.304Z" }, + { url = "https://files.pythonhosted.org/packages/58/1a/3c62c127a8466c9c843bccb503d40a273e5cc69838805f322e2826509e0d/propcache-0.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d902a36df4e5989763425a8ab9e98cd8ad5c52c823b34ee7ef307fd50582566", size = 214929, upload-time = "2025-10-08T19:46:28.62Z" }, + { url = "https://files.pythonhosted.org/packages/56/b9/8fa98f850960b367c4b8fe0592e7fc341daa7a9462e925228f10a60cf74f/propcache-0.4.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a9695397f85973bb40427dedddf70d8dc4a44b22f1650dd4af9eedf443d45165", size = 221778, upload-time = "2025-10-08T19:46:30.358Z" }, + { url = "https://files.pythonhosted.org/packages/46/a6/0ab4f660eb59649d14b3d3d65c439421cf2f87fe5dd68591cbe3c1e78a89/propcache-0.4.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2bb07ffd7eaad486576430c89f9b215f9e4be68c4866a96e97db9e97fead85dc", size = 228144, upload-time = "2025-10-08T19:46:32.607Z" }, + { url = "https://files.pythonhosted.org/packages/52/6a/57f43e054fb3d3a56ac9fc532bc684fc6169a26c75c353e65425b3e56eef/propcache-0.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd6f30fdcf9ae2a70abd34da54f18da086160e4d7d9251f81f3da0ff84fc5a48", size = 210030, upload-time = "2025-10-08T19:46:33.969Z" }, + { url = "https://files.pythonhosted.org/packages/40/e2/27e6feebb5f6b8408fa29f5efbb765cd54c153ac77314d27e457a3e993b7/propcache-0.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fc38cba02d1acba4e2869eef1a57a43dfbd3d49a59bf90dda7444ec2be6a5570", size = 208252, upload-time = "2025-10-08T19:46:35.309Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f8/91c27b22ccda1dbc7967f921c42825564fa5336a01ecd72eb78a9f4f53c2/propcache-0.4.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:67fad6162281e80e882fb3ec355398cf72864a54069d060321f6cd0ade95fe85", size = 202064, upload-time = "2025-10-08T19:46:36.993Z" }, + { url = "https://files.pythonhosted.org/packages/f2/26/7f00bd6bd1adba5aafe5f4a66390f243acab58eab24ff1a08bebb2ef9d40/propcache-0.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f10207adf04d08bec185bae14d9606a1444715bc99180f9331c9c02093e1959e", size = 212429, upload-time = "2025-10-08T19:46:38.398Z" }, + { url = "https://files.pythonhosted.org/packages/84/89/fd108ba7815c1117ddca79c228f3f8a15fc82a73bca8b142eb5de13b2785/propcache-0.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e9b0d8d0845bbc4cfcdcbcdbf5086886bc8157aa963c31c777ceff7846c77757", size = 216727, upload-time = "2025-10-08T19:46:39.732Z" }, + { url = "https://files.pythonhosted.org/packages/79/37/3ec3f7e3173e73f1d600495d8b545b53802cbf35506e5732dd8578db3724/propcache-0.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:981333cb2f4c1896a12f4ab92a9cc8f09ea664e9b7dbdc4eff74627af3a11c0f", size = 205097, upload-time = "2025-10-08T19:46:41.025Z" }, + { url = "https://files.pythonhosted.org/packages/61/b0/b2631c19793f869d35f47d5a3a56fb19e9160d3c119f15ac7344fc3ccae7/propcache-0.4.1-cp311-cp311-win32.whl", hash = "sha256:f1d2f90aeec838a52f1c1a32fe9a619fefd5e411721a9117fbf82aea638fe8a1", size = 38084, upload-time = "2025-10-08T19:46:42.693Z" }, + { url = "https://files.pythonhosted.org/packages/f4/78/6cce448e2098e9f3bfc91bb877f06aa24b6ccace872e39c53b2f707c4648/propcache-0.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:364426a62660f3f699949ac8c621aad6977be7126c5807ce48c0aeb8e7333ea6", size = 41637, upload-time = "2025-10-08T19:46:43.778Z" }, + { url = "https://files.pythonhosted.org/packages/9c/e9/754f180cccd7f51a39913782c74717c581b9cc8177ad0e949f4d51812383/propcache-0.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:e53f3a38d3510c11953f3e6a33f205c6d1b001129f972805ca9b42fc308bc239", size = 38064, upload-time = "2025-10-08T19:46:44.872Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, +] + +[[package]] +name = "protobuf" +version = "6.33.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/34/44/e49ecff446afeec9d1a66d6bbf9adc21e3c7cea7803a920ca3773379d4f6/protobuf-6.33.2.tar.gz", hash = "sha256:56dc370c91fbb8ac85bc13582c9e373569668a290aa2e66a590c2a0d35ddb9e4", size = 444296, upload-time = "2025-12-06T00:17:53.311Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/91/1e3a34881a88697a7354ffd177e8746e97a722e5e8db101544b47e84afb1/protobuf-6.33.2-cp310-abi3-win32.whl", hash = "sha256:87eb388bd2d0f78febd8f4c8779c79247b26a5befad525008e49a6955787ff3d", size = 425603, upload-time = "2025-12-06T00:17:41.114Z" }, + { url = "https://files.pythonhosted.org/packages/64/20/4d50191997e917ae13ad0a235c8b42d8c1ab9c3e6fd455ca16d416944355/protobuf-6.33.2-cp310-abi3-win_amd64.whl", hash = "sha256:fc2a0e8b05b180e5fc0dd1559fe8ebdae21a27e81ac77728fb6c42b12c7419b4", size = 436930, upload-time = "2025-12-06T00:17:43.278Z" }, + { url = "https://files.pythonhosted.org/packages/b2/ca/7e485da88ba45c920fb3f50ae78de29ab925d9e54ef0de678306abfbb497/protobuf-6.33.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d9b19771ca75935b3a4422957bc518b0cecb978b31d1dd12037b088f6bcc0e43", size = 427621, upload-time = "2025-12-06T00:17:44.445Z" }, + { url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" }, + { url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" }, + { url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" }, +] + +[[package]] +name = "psutil" +version = "7.1.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/88/bdd0a41e5857d5d703287598cbf08dad90aed56774ea52ae071bae9071b6/psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74", size = 489059, upload-time = "2025-11-02T12:25:54.619Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/94/46b9154a800253e7ecff5aaacdf8ebf43db99de4a2dfa18575b02548654e/psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab", size = 238359, upload-time = "2025-11-02T12:26:25.284Z" }, + { url = "https://files.pythonhosted.org/packages/68/3a/9f93cff5c025029a36d9a92fef47220ab4692ee7f2be0fba9f92813d0cb8/psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880", size = 239171, upload-time = "2025-11-02T12:26:27.23Z" }, + { url = "https://files.pythonhosted.org/packages/ce/b1/5f49af514f76431ba4eea935b8ad3725cdeb397e9245ab919dbc1d1dc20f/psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3", size = 263261, upload-time = "2025-11-02T12:26:29.48Z" }, + { url = "https://files.pythonhosted.org/packages/e0/95/992c8816a74016eb095e73585d747e0a8ea21a061ed3689474fabb29a395/psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b", size = 264635, upload-time = "2025-11-02T12:26:31.74Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/c3ed1a622b6ae2fd3c945a366e64eb35247a31e4db16cf5095e269e8eb3c/psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd", size = 247633, upload-time = "2025-11-02T12:26:33.887Z" }, + { url = "https://files.pythonhosted.org/packages/c9/ad/33b2ccec09bf96c2b2ef3f9a6f66baac8253d7565d8839e024a6b905d45d/psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1", size = 244608, upload-time = "2025-11-02T12:26:36.136Z" }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, +] + +[[package]] +name = "pvapy" +version = "5.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/05/bfd3146703cad7fada55912815e559639ee2a6c22e7b9d8b6abfbe14df92/pvapy-5.6.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:17e16942f26b82b0db177650402d834605041f6355ea25c141e0baa7ff82766a", size = 14264604, upload-time = "2025-08-11T17:55:28.757Z" }, + { url = "https://files.pythonhosted.org/packages/29/20/4b06d34b2a6164e4d20e7f082c6196a8cb4b6a82dd66c4bb2e095279b4ff/pvapy-5.6.0-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:a5da43a040938fa23b7fc015c0a79e801a9284971c799a02f8d88393bc003c61", size = 8791812, upload-time = "2025-08-14T20:29:31.135Z" }, + { url = "https://files.pythonhosted.org/packages/79/56/0401eed2ec3abf112f026c547846fcc562782e7f5cd5e428eb0fadb70ddc/pvapy-5.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c390f79bff5962925ad726d63d1e814100e0f3a2583ea8f9af34f8efcf63bb69", size = 14377319, upload-time = "2025-08-11T19:11:44.791Z" }, + { url = "https://files.pythonhosted.org/packages/7e/ed/c17d56880e4d45faffbfa4c31e61b51ce06d4ead339330b20950ab90ad2a/pvapy-5.6.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cfc63447eb961b635569c2a606ec51f5360876be8a785270223b77397874f4c5", size = 14676361, upload-time = "2025-08-08T22:03:08.028Z" }, + { url = "https://files.pythonhosted.org/packages/b7/3b/c595b5d84401861ee07aac00b607581fec31e22f755b8aa148551423501b/pvapy-5.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:6eee3ed411cb0065526a2894a243b77ddd145e06dd1cdaefa663c0cbc819fdf9", size = 2844607, upload-time = "2025-08-13T01:00:37.001Z" }, +] + +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/37/a8/d832f7293ebb21690860d2e01d8115e5ff6f2ae8bbdc953f0eb0fa4bd2c7/py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690", size = 104716, upload-time = "2022-10-25T20:38:06.303Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5", size = 22335, upload-time = "2022-10-25T20:38:27.636Z" }, +] + +[[package]] +name = "pycocotools" +version = "2.0.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a2/df/32354b5dda963ffdfc8f75c9acf8828ef7890723a4ed57bb3ff2dc1d6f7e/pycocotools-2.0.11.tar.gz", hash = "sha256:34254d76da85576fcaf5c1f3aa9aae16b8cb15418334ba4283b800796bd1993d", size = 25381, upload-time = "2025-12-15T22:31:46.148Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/3f/41ce3fce61b7721158f21b61727eb054805babc0088cfa48506935b80a36/pycocotools-2.0.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:81bdceebb4c64e9265213e2d733808a12f9c18dfb14457323cc6b9af07fa0e61", size = 158947, upload-time = "2025-12-15T22:31:03.291Z" }, + { url = "https://files.pythonhosted.org/packages/e2/9b/a739705b246445bd1376394bf9d1ec2dd292b16740e92f203461b2bb12ed/pycocotools-2.0.11-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a1c05f91ccc658dfe01325267209c4b435da1722c93eeb5749fabc1d087b6882", size = 485174, upload-time = "2025-12-15T22:31:04.395Z" }, + { url = "https://files.pythonhosted.org/packages/34/70/7a12752784e57d8034a76c245c618a2f88a9d2463862b990f314aea7e5d6/pycocotools-2.0.11-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18ba75ff58cedb33a85ce2c18f1452f1fe20c9dd59925eec5300b2bf6205dbe1", size = 493172, upload-time = "2025-12-15T22:31:05.504Z" }, + { url = "https://files.pythonhosted.org/packages/5c/fc/d703599ac728209dba08aea8d4bee884d5adabfcd9041abed1658d863747/pycocotools-2.0.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:693417797f0377fd094eb815c0a1e7d1c3c0251b71e3b3779fce3b3cf24793c5", size = 480506, upload-time = "2025-12-15T22:31:06.77Z" }, + { url = "https://files.pythonhosted.org/packages/81/d9/e1cfc320bbb2cd58c3b4398c3821cbe75d93c16ed3135ac9e774a18a02d3/pycocotools-2.0.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b6a07071c441d0f5e480a8f287106191582e40289d4e242dfe684e0c8a751088", size = 497595, upload-time = "2025-12-15T22:31:08.277Z" }, + { url = "https://files.pythonhosted.org/packages/a2/23/d17f6111c2a6ae8631d4fa90202bea05844da715d61431fbc34d276462d5/pycocotools-2.0.11-cp311-cp311-win_amd64.whl", hash = "sha256:8e159232adae3aef6b4e2d37b008bff107b26e9ed3b48e70ea6482302834bd34", size = 80519, upload-time = "2025-12-15T22:31:09.613Z" }, + { url = "https://files.pythonhosted.org/packages/00/4c/76b00b31a724c3f5ccdab0f85e578afb2ca38d33be0a0e98f1770cafd958/pycocotools-2.0.11-cp311-cp311-win_arm64.whl", hash = "sha256:4fc9889e819452b9c142036e1eabac8a13a8bd552d8beba299a57e0da6bfa1ec", size = 69304, upload-time = "2025-12-15T22:31:10.592Z" }, + { url = "https://files.pythonhosted.org/packages/87/12/2f2292332456e4e4aba1dec0e3de8f1fc40fb2f4fdb0ca1cb17db9861682/pycocotools-2.0.11-cp312-abi3-macosx_10_13_universal2.whl", hash = "sha256:a2e9634bc7cadfb01c88e0b98589aaf0bd12983c7927bde93f19c0103e5441f4", size = 147795, upload-time = "2025-12-15T22:31:11.519Z" }, + { url = "https://files.pythonhosted.org/packages/63/3c/68d7ea376aada9046e7ea2d7d0dad0d27e1ae8b4b3c26a28346689390ab2/pycocotools-2.0.11-cp312-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7fd4121766cc057133534679c0ec3f9023dbd96e9b31cf95c86a069ebdac2b65", size = 398434, upload-time = "2025-12-15T22:31:12.558Z" }, + { url = "https://files.pythonhosted.org/packages/23/59/dc81895beff4e1207a829d40d442ea87cefaac9f6499151965f05c479619/pycocotools-2.0.11-cp312-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a82d1c9ed83f75da0b3f244f2a3cf559351a283307bd9b79a4ee2b93ab3231dd", size = 411685, upload-time = "2025-12-15T22:31:13.995Z" }, + { url = "https://files.pythonhosted.org/packages/0b/0b/5a8a7de300862a2eb5e2ecd3cb015126231379206cd3ebba8f025388d770/pycocotools-2.0.11-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:89e853425018e2c2920ee0f2112cf7c140a1dcf5f4f49abd9c2da112c3e0f4b3", size = 390500, upload-time = "2025-12-15T22:31:15.138Z" }, + { url = "https://files.pythonhosted.org/packages/63/b5/519bb68647f06feea03d5f355c33c05800aeae4e57b9482b2859eb00752e/pycocotools-2.0.11-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:87af87b8d06d5b852a885a319d9362dca3bed9f8bbcc3feb6513acb1f88ea242", size = 409790, upload-time = "2025-12-15T22:31:16.326Z" }, + { url = "https://files.pythonhosted.org/packages/83/b4/f6708404ff494706b80e714b919f76dc4ec9845a4007affd6d6b0843f928/pycocotools-2.0.11-cp312-abi3-win_amd64.whl", hash = "sha256:ffe806ce535f5996445188f9a35643791dc54beabc61bd81e2b03367356d604f", size = 77570, upload-time = "2025-12-15T22:31:17.703Z" }, + { url = "https://files.pythonhosted.org/packages/6e/63/778cd0ddc9d4a78915ac0a72b56d7fb204f7c3fabdad067d67ea0089762e/pycocotools-2.0.11-cp312-abi3-win_arm64.whl", hash = "sha256:c230f5e7b14bd19085217b4f40bba81bf14a182b150b8e9fab1c15d504ade343", size = 64564, upload-time = "2025-12-15T22:31:18.652Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pyepics" +version = "3.5.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pyparsing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6e/56/6699256e0e2a4f42400906dffea5223d165762d50b305641d56f4a5825ce/pyepics-3.5.8.tar.gz", hash = "sha256:d44e6ac9404b5a827a5224cde374387b47f6f3f891c8437ddbd2f9fb913bba51", size = 6148950, upload-time = "2025-06-12T16:34:30.018Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/6e/10a8bbefd158d303b88d1075e24370b61c62c48d2d6ed38fa890fd1ec860/pyepics-3.5.8-py3-none-any.whl", hash = "sha256:02f322284f558feea16f8d4efee3d102e27c4f7c25cbfdafcc28eec944110a44", size = 5332328, upload-time = "2025-06-12T16:34:28.256Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyparsing" +version = "3.2.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/a5/181488fc2b9d093e3972d2a472855aae8a03f000592dbfce716a512b3359/pyparsing-3.2.5.tar.gz", hash = "sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6", size = 1099274, upload-time = "2025-09-21T04:11:06.277Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/5e/1aa9a93198c6b64513c9d7752de7422c06402de6600a8767da1524f9570b/pyparsing-3.2.5-py3-none-any.whl", hash = "sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e", size = 113890, upload-time = "2025-09-21T04:11:04.117Z" }, +] + +[[package]] +name = "pyqt5" +version = "5.15.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyqt5-qt5" }, + { name = "pyqt5-sip" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/07/c9ed0bd428df6f87183fca565a79fee19fa7c88c7f00a7f011ab4379e77a/PyQt5-5.15.11.tar.gz", hash = "sha256:fda45743ebb4a27b4b1a51c6d8ef455c4c1b5d610c90d2934c7802b5c1557c52", size = 3216775, upload-time = "2024-07-19T08:39:57.756Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/64/42ec1b0bd72d87f87bde6ceb6869f444d91a2d601f2e67cd05febc0346a1/PyQt5-5.15.11-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c8b03dd9380bb13c804f0bdb0f4956067f281785b5e12303d529f0462f9afdc2", size = 6579776, upload-time = "2024-07-19T08:39:19.775Z" }, + { url = "https://files.pythonhosted.org/packages/49/f5/3fb696f4683ea45d68b7e77302eff173493ac81e43d63adb60fa760b9f91/PyQt5-5.15.11-cp38-abi3-macosx_11_0_x86_64.whl", hash = "sha256:6cd75628f6e732b1ffcfe709ab833a0716c0445d7aec8046a48d5843352becb6", size = 7016415, upload-time = "2024-07-19T08:39:32.977Z" }, + { url = "https://files.pythonhosted.org/packages/b4/8c/4065950f9d013c4b2e588fe33cf04e564c2322842d84dbcbce5ba1dc28b0/PyQt5-5.15.11-cp38-abi3-manylinux_2_17_x86_64.whl", hash = "sha256:cd672a6738d1ae33ef7d9efa8e6cb0a1525ecf53ec86da80a9e1b6ec38c8d0f1", size = 8188103, upload-time = "2024-07-19T08:39:40.561Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/ae5a5b4f9b826b29ea4be841b2f2d951bcf5ae1d802f3732b145b57c5355/PyQt5-5.15.11-cp38-abi3-win32.whl", hash = "sha256:76be0322ceda5deecd1708a8d628e698089a1cea80d1a49d242a6d579a40babd", size = 5433308, upload-time = "2024-07-19T08:39:46.932Z" }, + { url = "https://files.pythonhosted.org/packages/56/d5/68eb9f3d19ce65df01b6c7b7a577ad3bbc9ab3a5dd3491a4756e71838ec9/PyQt5-5.15.11-cp38-abi3-win_amd64.whl", hash = "sha256:bdde598a3bb95022131a5c9ea62e0a96bd6fb28932cc1619fd7ba211531b7517", size = 6865864, upload-time = "2024-07-19T08:39:53.572Z" }, +] + +[[package]] +name = "pyqt5-qt5" +version = "5.15.18" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/90/bf01ac2132400997a3474051dd680a583381ebf98b2f5d64d4e54138dc42/pyqt5_qt5-5.15.18-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:8bb997eb903afa9da3221a0c9e6eaa00413bbeb4394d5706118ad05375684767", size = 39715743, upload-time = "2025-11-09T12:56:42.936Z" }, + { url = "https://files.pythonhosted.org/packages/24/8e/76366484d9f9dbe28e3bdfc688183433a7b82e314216e9b14c89e5fab690/pyqt5_qt5-5.15.18-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c656af9c1e6aaa7f59bf3d8995f2fa09adbf6762b470ed284c31dca80d686a26", size = 36798484, upload-time = "2025-11-09T12:56:59.998Z" }, + { url = "https://files.pythonhosted.org/packages/9a/46/ffe177f99f897a59dc237a20059020427bd2d3853d713992b8081933ddfe/pyqt5_qt5-5.15.18-py3-none-manylinux2014_x86_64.whl", hash = "sha256:bf2457e6371969736b4f660a0c153258fa03dbc6a181348218e6f05421682af7", size = 60864590, upload-time = "2025-11-09T12:57:26.724Z" }, +] + +[[package]] +name = "pyqt5-sip" +version = "12.17.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ea/08/88a20c862f40b5c178c517cdc7e93767967dec5ac1b994e226d517991c9b/pyqt5_sip-12.17.1.tar.gz", hash = "sha256:0eab72bcb628f1926bf5b9ac51259d4fa18e8b2a81d199071135458f7d087ea8", size = 104136, upload-time = "2025-10-08T09:04:19.893Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/e4/451e465c75584a7cbd10e10404317b7443af83f56a64e02080b1f3cda5b5/pyqt5_sip-12.17.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5134d637efadd108a70306bab55b3d7feaa951bf6b8162161a67ae847bea9130", size = 122581, upload-time = "2025-10-08T09:04:13.607Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b2/330f97434b21fbc99ab16f6ce71358ff5ea1bf1f09ed14dfe6b28b5ed8f5/pyqt5_sip-12.17.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:155cf755266c8bf64428916e2ff720d5efa1aec003d4ccc40c003b147dbdac03", size = 276844, upload-time = "2025-10-08T09:15:33.713Z" }, + { url = "https://files.pythonhosted.org/packages/3b/fd/53925099d0fc8aaf7adee613b6cebfb3fdfcd1238add64ff9edf6711e5f8/pyqt5_sip-12.17.1-cp311-cp311-win32.whl", hash = "sha256:9dfa7fe4ac93b60004430699c4bf56fef842a356d64dfea7cbc6d580d0427d6d", size = 49099, upload-time = "2025-10-08T09:11:23.928Z" }, + { url = "https://files.pythonhosted.org/packages/33/f8/f47a849c17676557c4220fbce9fcc24e15736af247c4dddcaf9ff0124b57/pyqt5_sip-12.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ddd214cf40119b86942a5da2da5a7345334955ab00026d8dcc56326b30e6d3c", size = 58988, upload-time = "2025-10-08T09:08:34.903Z" }, +] + +[[package]] +name = "pyqtgraph" +version = "0.13.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/33/d9/b62d5cddb3caa6e5145664bee5ed90223dee23ca887ed3ee479f2609e40a/pyqtgraph-0.13.7.tar.gz", hash = "sha256:64f84f1935c6996d0e09b1ee66fe478a7771e3ca6f3aaa05f00f6e068321d9e3", size = 2343380, upload-time = "2024-04-29T02:18:58.467Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/34/5702b3b7cafe99be1d94b42f100e8cc5e6957b761fcb1cf5f72d492851da/pyqtgraph-0.13.7-py3-none-any.whl", hash = "sha256:7754edbefb6c367fa0dfb176e2d0610da3ada20aa7a5318516c74af5fb72bf7a", size = 1925473, upload-time = "2024-04-29T02:18:56.206Z" }, +] + +[[package]] +name = "pyquaternion" +version = "0.9.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/0d/3d092aa20efaedacb89c3221a92c6491be5b28f618a2c36b52b53e7446c2/pyquaternion-0.9.9.tar.gz", hash = "sha256:b1f61af219cb2fe966b5fb79a192124f2e63a3f7a777ac3cadf2957b1a81bea8", size = 15530, upload-time = "2020-10-05T01:31:30.327Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/b3/d8482e8cacc8ea15a356efea13d22ce1c5914a9ee36622ba250523240bf2/pyquaternion-0.9.9-py3-none-any.whl", hash = "sha256:e65f6e3f7b1fdf1a9e23f82434334a1ae84f14223eee835190cd2e841f8172ec", size = 14361, upload-time = "2020-10-05T01:31:37.575Z" }, +] + +[[package]] +name = "pyreadline3" +version = "3.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/49/4cea918a08f02817aabae639e3d0ac046fef9f9180518a3ad394e22da148/pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7", size = 99839, upload-time = "2024-09-19T02:40:10.062Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6", size = 83178, upload-time = "2024-09-19T02:40:08.598Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-json-logger" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/29/bf/eca6a3d43db1dae7070f70e160ab20b807627ba953663ba07928cdd3dc58/python_json_logger-4.0.0.tar.gz", hash = "sha256:f58e68eb46e1faed27e0f574a55a0455eecd7b8a5b88b85a784519ba3cff047f", size = 17683, upload-time = "2025-10-06T04:15:18.984Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/e5/fecf13f06e5e5f67e8837d777d1bc43fac0ed2b77a676804df5c34744727/python_json_logger-4.0.0-py3-none-any.whl", hash = "sha256:af09c9daf6a813aa4cc7180395f50f2a9e5fa056034c9953aec92e381c5ba1e2", size = 15548, upload-time = "2025-10-06T04:15:17.553Z" }, +] + +[[package]] +name = "pytz" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, +] + +[[package]] +name = "pyvista" +version = "0.46.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "matplotlib" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "pooch" }, + { name = "scooby" }, + { name = "typing-extensions" }, + { name = "vtk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/03/3f/701daab844e6f0570d02931b3e4b0b7e04b38f97b9c95d12403645445b23/pyvista-0.46.4.tar.gz", hash = "sha256:37ddd2783a45b552623df293e77ce93ed015244e8d89d1de179d2df594d7c571", size = 2398029, upload-time = "2025-10-30T15:24:13.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/e5/41a88ce247c606a12498a9297ef323b441b2be811599033a06ea62af0724/pyvista-0.46.4-py3-none-any.whl", hash = "sha256:f113a8db2f49ae1b2f9eeb477c51a4038a4dfdd02ac097900aa49bf1d77f7b2e", size = 2448669, upload-time = "2025-10-30T15:24:11.112Z" }, +] + +[package.optional-dependencies] +jupyter = [ + { name = "ipywidgets" }, + { name = "jupyter-server-proxy" }, + { name = "nest-asyncio" }, + { name = "trame" }, + { name = "trame-client" }, + { name = "trame-server" }, + { name = "trame-vtk" }, + { name = "trame-vuetify" }, +] + +[[package]] +name = "pyvistaqt" +version = "0.11.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyvista" }, + { name = "qtpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/31/5d48ac53030f2598a964f84ddeeb2ec249950354b3c2261d73a5369af06c/pyvistaqt-0.11.3.tar.gz", hash = "sha256:b45cceaee5013a9f98fec3c5de175f5af897f3a6c558bf65c600a0812c20beba", size = 569997, upload-time = "2025-07-24T18:01:22.494Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/e6/f7382f72a104360463032f03db743d256b624d5cbadd6a7c66b810958b84/pyvistaqt-0.11.3-py3-none-any.whl", hash = "sha256:2b6b1c7b5e03bca3aa60cba0eb44430b779d6b605f38c0e1899ebcd6d9304c05", size = 131886, upload-time = "2025-07-24T18:01:20.915Z" }, +] + +[[package]] +name = "pywinpty" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/bb/a7cc2967c5c4eceb6cc49cfe39447d4bfc56e6c865e7c2249b6eb978935f/pywinpty-3.0.2.tar.gz", hash = "sha256:1505cc4cb248af42cb6285a65c9c2086ee9e7e574078ee60933d5d7fa86fb004", size = 30669, upload-time = "2025-10-03T21:16:29.205Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a6/a1/409c1651c9f874d598c10f51ff586c416625601df4bca315d08baec4c3e3/pywinpty-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:327790d70e4c841ebd9d0f295a780177149aeb405bca44c7115a3de5c2054b23", size = 2050304, upload-time = "2025-10-03T21:19:29.466Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/16/a95b6757765b7b031c9374925bb718d55e0a9ba8a1b6a12d25962ea44347/pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e", size = 185826, upload-time = "2025-09-25T21:31:58.655Z" }, + { url = "https://files.pythonhosted.org/packages/16/19/13de8e4377ed53079ee996e1ab0a9c33ec2faf808a4647b7b4c0d46dd239/pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824", size = 175577, upload-time = "2025-09-25T21:32:00.088Z" }, + { url = "https://files.pythonhosted.org/packages/0c/62/d2eb46264d4b157dae1275b573017abec435397aa59cbcdab6fc978a8af4/pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c", size = 775556, upload-time = "2025-09-25T21:32:01.31Z" }, + { url = "https://files.pythonhosted.org/packages/10/cb/16c3f2cf3266edd25aaa00d6c4350381c8b012ed6f5276675b9eba8d9ff4/pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00", size = 882114, upload-time = "2025-09-25T21:32:03.376Z" }, + { url = "https://files.pythonhosted.org/packages/71/60/917329f640924b18ff085ab889a11c763e0b573da888e8404ff486657602/pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d", size = 806638, upload-time = "2025-09-25T21:32:04.553Z" }, + { url = "https://files.pythonhosted.org/packages/dd/6f/529b0f316a9fd167281a6c3826b5583e6192dba792dd55e3203d3f8e655a/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a", size = 767463, upload-time = "2025-09-25T21:32:06.152Z" }, + { url = "https://files.pythonhosted.org/packages/f2/6a/b627b4e0c1dd03718543519ffb2f1deea4a1e6d42fbab8021936a4d22589/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4", size = 794986, upload-time = "2025-09-25T21:32:07.367Z" }, + { url = "https://files.pythonhosted.org/packages/45/91/47a6e1c42d9ee337c4839208f30d9f09caa9f720ec7582917b264defc875/pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b", size = 142543, upload-time = "2025-09-25T21:32:08.95Z" }, + { url = "https://files.pythonhosted.org/packages/da/e3/ea007450a105ae919a72393cb06f122f288ef60bba2dc64b26e2646fa315/pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf", size = 158763, upload-time = "2025-09-25T21:32:09.96Z" }, +] + +[[package]] +name = "pyzmq" +version = "27.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "implementation_name == 'pypy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/0b/3c9baedbdf613ecaa7aa07027780b8867f57b6293b6ee50de316c9f3222b/pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540", size = 281750, upload-time = "2025-09-08T23:10:18.157Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/5d/305323ba86b284e6fcb0d842d6adaa2999035f70f8c38a9b6d21ad28c3d4/pyzmq-27.1.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:226b091818d461a3bef763805e75685e478ac17e9008f49fce2d3e52b3d58b86", size = 1333328, upload-time = "2025-09-08T23:07:45.946Z" }, + { url = "https://files.pythonhosted.org/packages/bd/a0/fc7e78a23748ad5443ac3275943457e8452da67fda347e05260261108cbc/pyzmq-27.1.0-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:0790a0161c281ca9723f804871b4027f2e8b5a528d357c8952d08cd1a9c15581", size = 908803, upload-time = "2025-09-08T23:07:47.551Z" }, + { url = "https://files.pythonhosted.org/packages/7e/22/37d15eb05f3bdfa4abea6f6d96eb3bb58585fbd3e4e0ded4e743bc650c97/pyzmq-27.1.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c895a6f35476b0c3a54e3eb6ccf41bf3018de937016e6e18748317f25d4e925f", size = 668836, upload-time = "2025-09-08T23:07:49.436Z" }, + { url = "https://files.pythonhosted.org/packages/b1/c4/2a6fe5111a01005fc7af3878259ce17684fabb8852815eda6225620f3c59/pyzmq-27.1.0-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bbf8d3630bf96550b3be8e1fc0fea5cbdc8d5466c1192887bd94869da17a63e", size = 857038, upload-time = "2025-09-08T23:07:51.234Z" }, + { url = "https://files.pythonhosted.org/packages/cb/eb/bfdcb41d0db9cd233d6fb22dc131583774135505ada800ebf14dfb0a7c40/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15c8bd0fe0dabf808e2d7a681398c4e5ded70a551ab47482067a572c054c8e2e", size = 1657531, upload-time = "2025-09-08T23:07:52.795Z" }, + { url = "https://files.pythonhosted.org/packages/ab/21/e3180ca269ed4a0de5c34417dfe71a8ae80421198be83ee619a8a485b0c7/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bafcb3dd171b4ae9f19ee6380dfc71ce0390fefaf26b504c0e5f628d7c8c54f2", size = 2034786, upload-time = "2025-09-08T23:07:55.047Z" }, + { url = "https://files.pythonhosted.org/packages/3b/b1/5e21d0b517434b7f33588ff76c177c5a167858cc38ef740608898cd329f2/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e829529fcaa09937189178115c49c504e69289abd39967cd8a4c215761373394", size = 1894220, upload-time = "2025-09-08T23:07:57.172Z" }, + { url = "https://files.pythonhosted.org/packages/03/f2/44913a6ff6941905efc24a1acf3d3cb6146b636c546c7406c38c49c403d4/pyzmq-27.1.0-cp311-cp311-win32.whl", hash = "sha256:6df079c47d5902af6db298ec92151db82ecb557af663098b92f2508c398bb54f", size = 567155, upload-time = "2025-09-08T23:07:59.05Z" }, + { url = "https://files.pythonhosted.org/packages/23/6d/d8d92a0eb270a925c9b4dd039c0b4dc10abc2fcbc48331788824ef113935/pyzmq-27.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:190cbf120fbc0fc4957b56866830def56628934a9d112aec0e2507aa6a032b97", size = 633428, upload-time = "2025-09-08T23:08:00.663Z" }, + { url = "https://files.pythonhosted.org/packages/ae/14/01afebc96c5abbbd713ecfc7469cfb1bc801c819a74ed5c9fad9a48801cb/pyzmq-27.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:eca6b47df11a132d1745eb3b5b5e557a7dae2c303277aa0e69c6ba91b8736e07", size = 559497, upload-time = "2025-09-08T23:08:02.15Z" }, + { url = "https://files.pythonhosted.org/packages/92/e7/038aab64a946d535901103da16b953c8c9cc9c961dadcbf3609ed6428d23/pyzmq-27.1.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:452631b640340c928fa343801b0d07eb0c3789a5ffa843f6e1a9cee0ba4eb4fc", size = 1306279, upload-time = "2025-09-08T23:08:03.807Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5e/c3c49fdd0f535ef45eefcc16934648e9e59dace4a37ee88fc53f6cd8e641/pyzmq-27.1.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1c179799b118e554b66da67d88ed66cd37a169f1f23b5d9f0a231b4e8d44a113", size = 895645, upload-time = "2025-09-08T23:08:05.301Z" }, + { url = "https://files.pythonhosted.org/packages/f8/e5/b0b2504cb4e903a74dcf1ebae157f9e20ebb6ea76095f6cfffea28c42ecd/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3837439b7f99e60312f0c926a6ad437b067356dc2bc2ec96eb395fd0fe804233", size = 652574, upload-time = "2025-09-08T23:08:06.828Z" }, + { url = "https://files.pythonhosted.org/packages/f8/9b/c108cdb55560eaf253f0cbdb61b29971e9fb34d9c3499b0e96e4e60ed8a5/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43ad9a73e3da1fab5b0e7e13402f0b2fb934ae1c876c51d0afff0e7c052eca31", size = 840995, upload-time = "2025-09-08T23:08:08.396Z" }, + { url = "https://files.pythonhosted.org/packages/c2/bb/b79798ca177b9eb0825b4c9998c6af8cd2a7f15a6a1a4272c1d1a21d382f/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0de3028d69d4cdc475bfe47a6128eb38d8bc0e8f4d69646adfbcd840facbac28", size = 1642070, upload-time = "2025-09-08T23:08:09.989Z" }, + { url = "https://files.pythonhosted.org/packages/9c/80/2df2e7977c4ede24c79ae39dcef3899bfc5f34d1ca7a5b24f182c9b7a9ca/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf44a7763aea9298c0aa7dbf859f87ed7012de8bda0f3977b6fb1d96745df856", size = 2021121, upload-time = "2025-09-08T23:08:11.907Z" }, + { url = "https://files.pythonhosted.org/packages/46/bd/2d45ad24f5f5ae7e8d01525eb76786fa7557136555cac7d929880519e33a/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f30f395a9e6fbca195400ce833c731e7b64c3919aa481af4d88c3759e0cb7496", size = 1878550, upload-time = "2025-09-08T23:08:13.513Z" }, + { url = "https://files.pythonhosted.org/packages/e6/2f/104c0a3c778d7c2ab8190e9db4f62f0b6957b53c9d87db77c284b69f33ea/pyzmq-27.1.0-cp312-abi3-win32.whl", hash = "sha256:250e5436a4ba13885494412b3da5d518cd0d3a278a1ae640e113c073a5f88edd", size = 559184, upload-time = "2025-09-08T23:08:15.163Z" }, + { url = "https://files.pythonhosted.org/packages/fc/7f/a21b20d577e4100c6a41795842028235998a643b1ad406a6d4163ea8f53e/pyzmq-27.1.0-cp312-abi3-win_amd64.whl", hash = "sha256:9ce490cf1d2ca2ad84733aa1d69ce6855372cb5ce9223802450c9b2a7cba0ccf", size = 619480, upload-time = "2025-09-08T23:08:17.192Z" }, + { url = "https://files.pythonhosted.org/packages/78/c2/c012beae5f76b72f007a9e91ee9401cb88c51d0f83c6257a03e785c81cc2/pyzmq-27.1.0-cp312-abi3-win_arm64.whl", hash = "sha256:75a2f36223f0d535a0c919e23615fc85a1e23b71f40c7eb43d7b1dedb4d8f15f", size = 552993, upload-time = "2025-09-08T23:08:18.926Z" }, + { url = "https://files.pythonhosted.org/packages/4c/c6/c4dcdecdbaa70969ee1fdced6d7b8f60cfabe64d25361f27ac4665a70620/pyzmq-27.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:18770c8d3563715387139060d37859c02ce40718d1faf299abddcdcc6a649066", size = 836265, upload-time = "2025-09-08T23:09:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/3e/79/f38c92eeaeb03a2ccc2ba9866f0439593bb08c5e3b714ac1d553e5c96e25/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:ac25465d42f92e990f8d8b0546b01c391ad431c3bf447683fdc40565941d0604", size = 800208, upload-time = "2025-09-08T23:09:51.073Z" }, + { url = "https://files.pythonhosted.org/packages/49/0e/3f0d0d335c6b3abb9b7b723776d0b21fa7f3a6c819a0db6097059aada160/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53b40f8ae006f2734ee7608d59ed661419f087521edbfc2149c3932e9c14808c", size = 567747, upload-time = "2025-09-08T23:09:52.698Z" }, + { url = "https://files.pythonhosted.org/packages/a1/cf/f2b3784d536250ffd4be70e049f3b60981235d70c6e8ce7e3ef21e1adb25/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f605d884e7c8be8fe1aa94e0a783bf3f591b84c24e4bc4f3e7564c82ac25e271", size = 747371, upload-time = "2025-09-08T23:09:54.563Z" }, + { url = "https://files.pythonhosted.org/packages/01/1b/5dbe84eefc86f48473947e2f41711aded97eecef1231f4558f1f02713c12/pyzmq-27.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c9f7f6e13dff2e44a6afeaf2cf54cee5929ad64afaf4d40b50f93c58fc687355", size = 544862, upload-time = "2025-09-08T23:09:56.509Z" }, +] + +[[package]] +name = "qtawesome" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "qtpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/57/2f6c402b4cd91a58fd378a5f36be6b5855cf43dc25f77f581e2612e6d558/qtawesome-1.4.0.tar.gz", hash = "sha256:783e414d1317f3e978bf67ea8e8a1b1498bad9dbd305dec814027e3b50521be6", size = 2614365, upload-time = "2025-02-27T22:01:01.864Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/ee/6e6c6715129c929af2d95ddb2e9decf54c1beffe58f336911197aacc0448/qtawesome-1.4.0-py3-none-any.whl", hash = "sha256:a4d689fa071c595aa6184171ce1f0f847677cb8d2db45382c43129f1d72a3d93", size = 2595296, upload-time = "2025-02-27T22:00:59.921Z" }, +] + +[[package]] +name = "qtpy" +version = "2.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/01/392eba83c8e47b946b929d7c46e0f04b35e9671f8bb6fc36b6f7945b4de8/qtpy-2.4.3.tar.gz", hash = "sha256:db744f7832e6d3da90568ba6ccbca3ee2b3b4a890c3d6fbbc63142f6e4cdf5bb", size = 66982, upload-time = "2025-02-11T15:09:25.759Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/76/37c0ccd5ab968a6a438f9c623aeecc84c202ab2fabc6a8fd927580c15b5a/QtPy-2.4.3-py3-none-any.whl", hash = "sha256:72095afe13673e017946cc258b8d5da43314197b741ed2890e563cf384b51aa1", size = 95045, upload-time = "2025-02-11T15:09:24.162Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "retrying" +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c8/5a/b17e1e257d3e6f2e7758930e1256832c9ddd576f8631781e6a072914befa/retrying-1.4.2.tar.gz", hash = "sha256:d102e75d53d8d30b88562d45361d6c6c934da06fab31bd81c0420acb97a8ba39", size = 11411, upload-time = "2025-08-03T03:35:25.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/f3/6cd296376653270ac1b423bb30bd70942d9916b6978c6f40472d6ac038e7/retrying-1.4.2-py3-none-any.whl", hash = "sha256:bbc004aeb542a74f3569aeddf42a2516efefcdaff90df0eb38fbfbf19f179f59", size = 10859, upload-time = "2025-08-03T03:35:23.829Z" }, +] + +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/ea/a9387748e2d111c3c2b275ba970b735e04e15cdb1eb30693b6b5708c4dbd/rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b", size = 5513, upload-time = "2021-05-12T16:37:54.178Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/44/4e421b96b67b2daff264473f7465db72fbdf36a07e05494f50300cc7b0c6/rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa", size = 3490, upload-time = "2021-05-12T16:37:52.536Z" }, +] + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/88/f270de456dd7d11dcc808abfa291ecdd3f45ff44e3b549ffa01b126464d0/rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055", size = 6760, upload-time = "2019-10-28T16:00:19.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/51/17023c0f8f1869d8806b979a2bffa3f861f26a3f1a66b094288323fba52f/rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9", size = 4242, upload-time = "2019-10-28T16:00:13.976Z" }, +] + +[[package]] +name = "rfc3987-syntax" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lark" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2c/06/37c1a5557acf449e8e406a830a05bf885ac47d33270aec454ef78675008d/rfc3987_syntax-1.1.0.tar.gz", hash = "sha256:717a62cbf33cffdd16dfa3a497d81ce48a660ea691b1ddd7be710c22f00b4a0d", size = 14239, upload-time = "2025-07-18T01:05:05.015Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/71/44ce230e1b7fadd372515a97e32a83011f906ddded8d03e3c6aafbdedbb7/rfc3987_syntax-1.1.0-py3-none-any.whl", hash = "sha256:6c3d97604e4c5ce9f714898e05401a0445a641cfa276432b0a648c80856f6a3f", size = 8046, upload-time = "2025-07-18T01:05:03.843Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.28.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/48/dc/95f074d43452b3ef5d06276696ece4b3b5d696e7c9ad7173c54b1390cd70/rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea", size = 27419, upload-time = "2025-10-22T22:24:29.327Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a6/34/058d0db5471c6be7bef82487ad5021ff8d1d1d27794be8730aad938649cf/rpds_py-0.28.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:03065002fd2e287725d95fbc69688e0c6daf6c6314ba38bdbaa3895418e09296", size = 362344, upload-time = "2025-10-22T22:21:39.713Z" }, + { url = "https://files.pythonhosted.org/packages/5d/67/9503f0ec8c055a0782880f300c50a2b8e5e72eb1f94dfc2053da527444dd/rpds_py-0.28.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28ea02215f262b6d078daec0b45344c89e161eab9526b0d898221d96fdda5f27", size = 348440, upload-time = "2025-10-22T22:21:41.056Z" }, + { url = "https://files.pythonhosted.org/packages/68/2e/94223ee9b32332a41d75b6f94b37b4ce3e93878a556fc5f152cbd856a81f/rpds_py-0.28.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25dbade8fbf30bcc551cb352376c0ad64b067e4fc56f90e22ba70c3ce205988c", size = 379068, upload-time = "2025-10-22T22:21:42.593Z" }, + { url = "https://files.pythonhosted.org/packages/b4/25/54fd48f9f680cfc44e6a7f39a5fadf1d4a4a1fd0848076af4a43e79f998c/rpds_py-0.28.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c03002f54cc855860bfdc3442928ffdca9081e73b5b382ed0b9e8efe6e5e205", size = 390518, upload-time = "2025-10-22T22:21:43.998Z" }, + { url = "https://files.pythonhosted.org/packages/1b/85/ac258c9c27f2ccb1bd5d0697e53a82ebcf8088e3186d5d2bf8498ee7ed44/rpds_py-0.28.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9699fa7990368b22032baf2b2dce1f634388e4ffc03dfefaaac79f4695edc95", size = 525319, upload-time = "2025-10-22T22:21:45.645Z" }, + { url = "https://files.pythonhosted.org/packages/40/cb/c6734774789566d46775f193964b76627cd5f42ecf246d257ce84d1912ed/rpds_py-0.28.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9b06fe1a75e05e0713f06ea0c89ecb6452210fd60e2f1b6ddc1067b990e08d9", size = 404896, upload-time = "2025-10-22T22:21:47.544Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/14e37ce83202c632c89b0691185dca9532288ff9d390eacae3d2ff771bae/rpds_py-0.28.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9f83e7b326a3f9ec3ef84cda98fb0a74c7159f33e692032233046e7fd15da2", size = 382862, upload-time = "2025-10-22T22:21:49.176Z" }, + { url = "https://files.pythonhosted.org/packages/6a/83/f3642483ca971a54d60caa4449f9d6d4dbb56a53e0072d0deff51b38af74/rpds_py-0.28.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:0d3259ea9ad8743a75a43eb7819324cdab393263c91be86e2d1901ee65c314e0", size = 398848, upload-time = "2025-10-22T22:21:51.024Z" }, + { url = "https://files.pythonhosted.org/packages/44/09/2d9c8b2f88e399b4cfe86efdf2935feaf0394e4f14ab30c6c5945d60af7d/rpds_py-0.28.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a7548b345f66f6695943b4ef6afe33ccd3f1b638bd9afd0f730dd255c249c9e", size = 412030, upload-time = "2025-10-22T22:21:52.665Z" }, + { url = "https://files.pythonhosted.org/packages/dd/f5/e1cec473d4bde6df1fd3738be8e82d64dd0600868e76e92dfeaebbc2d18f/rpds_py-0.28.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9a40040aa388b037eb39416710fbcce9443498d2eaab0b9b45ae988b53f5c67", size = 559700, upload-time = "2025-10-22T22:21:54.123Z" }, + { url = "https://files.pythonhosted.org/packages/8d/be/73bb241c1649edbf14e98e9e78899c2c5e52bbe47cb64811f44d2cc11808/rpds_py-0.28.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8f60c7ea34e78c199acd0d3cda37a99be2c861dd2b8cf67399784f70c9f8e57d", size = 584581, upload-time = "2025-10-22T22:21:56.102Z" }, + { url = "https://files.pythonhosted.org/packages/9c/9c/ffc6e9218cd1eb5c2c7dbd276c87cd10e8c2232c456b554169eb363381df/rpds_py-0.28.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1571ae4292649100d743b26d5f9c63503bb1fedf538a8f29a98dce2d5ba6b4e6", size = 549981, upload-time = "2025-10-22T22:21:58.253Z" }, + { url = "https://files.pythonhosted.org/packages/5f/50/da8b6d33803a94df0149345ee33e5d91ed4d25fc6517de6a25587eae4133/rpds_py-0.28.0-cp311-cp311-win32.whl", hash = "sha256:5cfa9af45e7c1140af7321fa0bef25b386ee9faa8928c80dc3a5360971a29e8c", size = 214729, upload-time = "2025-10-22T22:21:59.625Z" }, + { url = "https://files.pythonhosted.org/packages/12/fd/b0f48c4c320ee24c8c20df8b44acffb7353991ddf688af01eef5f93d7018/rpds_py-0.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:dd8d86b5d29d1b74100982424ba53e56033dc47720a6de9ba0259cf81d7cecaa", size = 223977, upload-time = "2025-10-22T22:22:01.092Z" }, + { url = "https://files.pythonhosted.org/packages/b4/21/c8e77a2ac66e2ec4e21f18a04b4e9a0417ecf8e61b5eaeaa9360a91713b4/rpds_py-0.28.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e27d3a5709cc2b3e013bf93679a849213c79ae0573f9b894b284b55e729e120", size = 217326, upload-time = "2025-10-22T22:22:02.944Z" }, + { url = "https://files.pythonhosted.org/packages/ae/bc/b43f2ea505f28119bd551ae75f70be0c803d2dbcd37c1b3734909e40620b/rpds_py-0.28.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f5e7101145427087e493b9c9b959da68d357c28c562792300dd21a095118ed16", size = 363913, upload-time = "2025-10-22T22:24:07.129Z" }, + { url = "https://files.pythonhosted.org/packages/28/f2/db318195d324c89a2c57dc5195058cbadd71b20d220685c5bd1da79ee7fe/rpds_py-0.28.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:31eb671150b9c62409a888850aaa8e6533635704fe2b78335f9aaf7ff81eec4d", size = 350452, upload-time = "2025-10-22T22:24:08.754Z" }, + { url = "https://files.pythonhosted.org/packages/ae/f2/1391c819b8573a4898cedd6b6c5ec5bc370ce59e5d6bdcebe3c9c1db4588/rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b55c1f64482f7d8bd39942f376bfdf2f6aec637ee8c805b5041e14eeb771db", size = 380957, upload-time = "2025-10-22T22:24:10.826Z" }, + { url = "https://files.pythonhosted.org/packages/5a/5c/e5de68ee7eb7248fce93269833d1b329a196d736aefb1a7481d1e99d1222/rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24743a7b372e9a76171f6b69c01aedf927e8ac3e16c474d9fe20d552a8cb45c7", size = 391919, upload-time = "2025-10-22T22:24:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/fb/4f/2376336112cbfeb122fd435d608ad8d5041b3aed176f85a3cb32c262eb80/rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:389c29045ee8bbb1627ea190b4976a310a295559eaf9f1464a1a6f2bf84dde78", size = 528541, upload-time = "2025-10-22T22:24:14.197Z" }, + { url = "https://files.pythonhosted.org/packages/68/53/5ae232e795853dd20da7225c5dd13a09c0a905b1a655e92bdf8d78a99fd9/rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23690b5827e643150cf7b49569679ec13fe9a610a15949ed48b85eb7f98f34ec", size = 405629, upload-time = "2025-10-22T22:24:16.001Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2d/351a3b852b683ca9b6b8b38ed9efb2347596973849ba6c3a0e99877c10aa/rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f0c9266c26580e7243ad0d72fc3e01d6b33866cfab5084a6da7576bcf1c4f72", size = 384123, upload-time = "2025-10-22T22:24:17.585Z" }, + { url = "https://files.pythonhosted.org/packages/e0/15/870804daa00202728cc91cb8e2385fa9f1f4eb49857c49cfce89e304eae6/rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4c6c4db5d73d179746951486df97fd25e92396be07fc29ee8ff9a8f5afbdfb27", size = 400923, upload-time = "2025-10-22T22:24:19.512Z" }, + { url = "https://files.pythonhosted.org/packages/53/25/3706b83c125fa2a0bccceac951de3f76631f6bd0ee4d02a0ed780712ef1b/rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3b695a8fa799dd2cfdb4804b37096c5f6dba1ac7f48a7fbf6d0485bcd060316", size = 413767, upload-time = "2025-10-22T22:24:21.316Z" }, + { url = "https://files.pythonhosted.org/packages/ef/f9/ce43dbe62767432273ed2584cef71fef8411bddfb64125d4c19128015018/rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:6aa1bfce3f83baf00d9c5fcdbba93a3ab79958b4c7d7d1f55e7fe68c20e63912", size = 561530, upload-time = "2025-10-22T22:24:22.958Z" }, + { url = "https://files.pythonhosted.org/packages/46/c9/ffe77999ed8f81e30713dd38fd9ecaa161f28ec48bb80fa1cd9118399c27/rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b0f9dceb221792b3ee6acb5438eb1f02b0cb2c247796a72b016dcc92c6de829", size = 585453, upload-time = "2025-10-22T22:24:24.779Z" }, + { url = "https://files.pythonhosted.org/packages/ed/d2/4a73b18821fd4669762c855fd1f4e80ceb66fb72d71162d14da58444a763/rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5d0145edba8abd3db0ab22b5300c99dc152f5c9021fab861be0f0544dc3cbc5f", size = 552199, upload-time = "2025-10-22T22:24:26.54Z" }, +] + +[[package]] +name = "scikit-image" +version = "0.25.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "imageio" }, + { name = "lazy-loader" }, + { name = "networkx" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "scipy" }, + { name = "tifffile" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/a8/3c0f256012b93dd2cb6fda9245e9f4bff7dc0486880b248005f15ea2255e/scikit_image-0.25.2.tar.gz", hash = "sha256:e5a37e6cd4d0c018a7a55b9d601357e3382826d3888c10d0213fc63bff977dde", size = 22693594, upload-time = "2025-02-18T18:05:24.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/97/3051c68b782ee3f1fb7f8f5bb7d535cf8cb92e8aae18fa9c1cdf7e15150d/scikit_image-0.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f4bac9196fb80d37567316581c6060763b0f4893d3aca34a9ede3825bc035b17", size = 14003057, upload-time = "2025-02-18T18:04:30.395Z" }, + { url = "https://files.pythonhosted.org/packages/19/23/257fc696c562639826065514d551b7b9b969520bd902c3a8e2fcff5b9e17/scikit_image-0.25.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d989d64ff92e0c6c0f2018c7495a5b20e2451839299a018e0e5108b2680f71e0", size = 13180335, upload-time = "2025-02-18T18:04:33.449Z" }, + { url = "https://files.pythonhosted.org/packages/ef/14/0c4a02cb27ca8b1e836886b9ec7c9149de03053650e9e2ed0625f248dd92/scikit_image-0.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2cfc96b27afe9a05bc92f8c6235321d3a66499995675b27415e0d0c76625173", size = 14144783, upload-time = "2025-02-18T18:04:36.594Z" }, + { url = "https://files.pythonhosted.org/packages/dd/9b/9fb556463a34d9842491d72a421942c8baff4281025859c84fcdb5e7e602/scikit_image-0.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24cc986e1f4187a12aa319f777b36008764e856e5013666a4a83f8df083c2641", size = 14785376, upload-time = "2025-02-18T18:04:39.856Z" }, + { url = "https://files.pythonhosted.org/packages/de/ec/b57c500ee85885df5f2188f8bb70398481393a69de44a00d6f1d055f103c/scikit_image-0.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:b4f6b61fc2db6340696afe3db6b26e0356911529f5f6aee8c322aa5157490c9b", size = 12791698, upload-time = "2025-02-18T18:04:42.868Z" }, +] + +[[package]] +name = "scikit-learn" +version = "1.7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "joblib" }, + { name = "numpy" }, + { name = "scipy" }, + { name = "threadpoolctl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/c2/a7855e41c9d285dfe86dc50b250978105dce513d6e459ea66a6aeb0e1e0c/scikit_learn-1.7.2.tar.gz", hash = "sha256:20e9e49ecd130598f1ca38a1d85090e1a600147b9c02fa6f15d69cb53d968fda", size = 7193136, upload-time = "2025-09-09T08:21:29.075Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/83/564e141eef908a5863a54da8ca342a137f45a0bfb71d1d79704c9894c9d1/scikit_learn-1.7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7509693451651cd7361d30ce4e86a1347493554f172b1c72a39300fa2aea79e", size = 9331967, upload-time = "2025-09-09T08:20:32.421Z" }, + { url = "https://files.pythonhosted.org/packages/18/d6/ba863a4171ac9d7314c4d3fc251f015704a2caeee41ced89f321c049ed83/scikit_learn-1.7.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:0486c8f827c2e7b64837c731c8feff72c0bd2b998067a8a9cbc10643c31f0fe1", size = 8648645, upload-time = "2025-09-09T08:20:34.436Z" }, + { url = "https://files.pythonhosted.org/packages/ef/0e/97dbca66347b8cf0ea8b529e6bb9367e337ba2e8be0ef5c1a545232abfde/scikit_learn-1.7.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89877e19a80c7b11a2891a27c21c4894fb18e2c2e077815bcade10d34287b20d", size = 9715424, upload-time = "2025-09-09T08:20:36.776Z" }, + { url = "https://files.pythonhosted.org/packages/f7/32/1f3b22e3207e1d2c883a7e09abb956362e7d1bd2f14458c7de258a26ac15/scikit_learn-1.7.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8da8bf89d4d79aaec192d2bda62f9b56ae4e5b4ef93b6a56b5de4977e375c1f1", size = 9509234, upload-time = "2025-09-09T08:20:38.957Z" }, + { url = "https://files.pythonhosted.org/packages/9f/71/34ddbd21f1da67c7a768146968b4d0220ee6831e4bcbad3e03dd3eae88b6/scikit_learn-1.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:9b7ed8d58725030568523e937c43e56bc01cadb478fc43c042a9aca1dacb3ba1", size = 8894244, upload-time = "2025-09-09T08:20:41.166Z" }, +] + +[[package]] +name = "scipy" +version = "1.16.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0a/ca/d8ace4f98322d01abcd52d381134344bf7b431eba7ed8b42bdea5a3c2ac9/scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb", size = 30597883, upload-time = "2025-10-28T17:38:54.068Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/5f/6f37d7439de1455ce9c5a556b8d1db0979f03a796c030bafdf08d35b7bf9/scipy-1.16.3-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:40be6cf99e68b6c4321e9f8782e7d5ff8265af28ef2cd56e9c9b2638fa08ad97", size = 36630881, upload-time = "2025-10-28T17:31:47.104Z" }, + { url = "https://files.pythonhosted.org/packages/7c/89/d70e9f628749b7e4db2aa4cd89735502ff3f08f7b9b27d2e799485987cd9/scipy-1.16.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:8be1ca9170fcb6223cc7c27f4305d680ded114a1567c0bd2bfcbf947d1b17511", size = 28941012, upload-time = "2025-10-28T17:31:53.411Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a8/0e7a9a6872a923505dbdf6bb93451edcac120363131c19013044a1e7cb0c/scipy-1.16.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:bea0a62734d20d67608660f69dcda23e7f90fb4ca20974ab80b6ed40df87a005", size = 20931935, upload-time = "2025-10-28T17:31:57.361Z" }, + { url = "https://files.pythonhosted.org/packages/bd/c7/020fb72bd79ad798e4dbe53938543ecb96b3a9ac3fe274b7189e23e27353/scipy-1.16.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:2a207a6ce9c24f1951241f4693ede2d393f59c07abc159b2cb2be980820e01fb", size = 23534466, upload-time = "2025-10-28T17:32:01.875Z" }, + { url = "https://files.pythonhosted.org/packages/be/a0/668c4609ce6dbf2f948e167836ccaf897f95fb63fa231c87da7558a374cd/scipy-1.16.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:532fb5ad6a87e9e9cd9c959b106b73145a03f04c7d57ea3e6f6bb60b86ab0876", size = 33593618, upload-time = "2025-10-28T17:32:06.902Z" }, + { url = "https://files.pythonhosted.org/packages/ca/6e/8942461cf2636cdae083e3eb72622a7fbbfa5cf559c7d13ab250a5dbdc01/scipy-1.16.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0151a0749efeaaab78711c78422d413c583b8cdd2011a3c1d6c794938ee9fdb2", size = 35899798, upload-time = "2025-10-28T17:32:12.665Z" }, + { url = "https://files.pythonhosted.org/packages/79/e8/d0f33590364cdbd67f28ce79368b373889faa4ee959588beddf6daef9abe/scipy-1.16.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b7180967113560cca57418a7bc719e30366b47959dd845a93206fbed693c867e", size = 36226154, upload-time = "2025-10-28T17:32:17.961Z" }, + { url = "https://files.pythonhosted.org/packages/39/c1/1903de608c0c924a1749c590064e65810f8046e437aba6be365abc4f7557/scipy-1.16.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:deb3841c925eeddb6afc1e4e4a45e418d19ec7b87c5df177695224078e8ec733", size = 38878540, upload-time = "2025-10-28T17:32:23.907Z" }, + { url = "https://files.pythonhosted.org/packages/f1/d0/22ec7036ba0b0a35bccb7f25ab407382ed34af0b111475eb301c16f8a2e5/scipy-1.16.3-cp311-cp311-win_amd64.whl", hash = "sha256:53c3844d527213631e886621df5695d35e4f6a75f620dca412bcd292f6b87d78", size = 38722107, upload-time = "2025-10-28T17:32:29.921Z" }, + { url = "https://files.pythonhosted.org/packages/7b/60/8a00e5a524bb3bf8898db1650d350f50e6cffb9d7a491c561dc9826c7515/scipy-1.16.3-cp311-cp311-win_arm64.whl", hash = "sha256:9452781bd879b14b6f055b26643703551320aa8d79ae064a71df55c00286a184", size = 25506272, upload-time = "2025-10-28T17:32:34.577Z" }, +] + +[[package]] +name = "scooby" +version = "0.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/d1/a28f3be1503a9c474a4878424bbeb93a55a2ec7d0cb66559aa258e690aea/scooby-0.11.0.tar.gz", hash = "sha256:3dfacc6becf2d6558efa4b625bae3b844ced5d256f3143ebf774e005367e712a", size = 22102, upload-time = "2025-11-01T19:22:53.894Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/bb/bbae36d06c0fd670e8373da67096cd57058b57c9bad7d92969b5e3b730af/scooby-0.11.0-py3-none-any.whl", hash = "sha256:a79663d1a7711eb104e4b2935988ea1ed5f7be6b7288fad23b4fba7462832f9d", size = 19877, upload-time = "2025-11-01T19:22:53.046Z" }, +] + +[[package]] +name = "segment-anything" +version = "1.0" +source = { git = "https://github.com/facebookresearch/segment-anything.git#dca509fe793f601edb92606367a655c15ac00fdf" } + +[[package]] +name = "send2trash" +version = "1.8.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fd/3a/aec9b02217bb79b87bbc1a21bc6abc51e3d5dcf65c30487ac96c0908c722/Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf", size = 17394, upload-time = "2024-04-07T00:01:09.267Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/b0/4562db6223154aa4e22f939003cb92514c79f3d4dccca3444253fd17f902/Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9", size = 18072, upload-time = "2024-04-07T00:01:07.438Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "simpervisor" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/dd/fc/9182a4049036c5de29f84a16c5a33304ffc4dbb06d76d569ded8ad527574/simpervisor-1.0.0.tar.gz", hash = "sha256:7eb87ca86d5e276976f5bb0290975a05d452c6a7b7f58062daea7d8369c823c1", size = 14637, upload-time = "2023-05-18T14:01:27.069Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/65/be223a02df814a3dbd84d8a0c446d21d4860a4f23ec4d81aabea34e7e994/simpervisor-1.0.0-py3-none-any.whl", hash = "sha256:3e313318264559beea3f475ead202bc1cd58a2f1288363abb5657d306c5b8388", size = 8342, upload-time = "2023-05-18T14:01:25.92Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" }, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.44" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f0/f2/840d7b9496825333f532d2e3976b8eadbf52034178aac53630d09fe6e1ef/sqlalchemy-2.0.44.tar.gz", hash = "sha256:0ae7454e1ab1d780aee69fd2aae7d6b8670a581d8847f2d1e0f7ddfbf47e5a22", size = 9819830, upload-time = "2025-10-10T14:39:12.935Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/81/15d7c161c9ddf0900b076b55345872ed04ff1ed6a0666e5e94ab44b0163c/sqlalchemy-2.0.44-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fe3917059c7ab2ee3f35e77757062b1bea10a0b6ca633c58391e3f3c6c488dd", size = 2140517, upload-time = "2025-10-10T15:36:15.64Z" }, + { url = "https://files.pythonhosted.org/packages/d4/d5/4abd13b245c7d91bdf131d4916fd9e96a584dac74215f8b5bc945206a974/sqlalchemy-2.0.44-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:de4387a354ff230bc979b46b2207af841dc8bf29847b6c7dbe60af186d97aefa", size = 2130738, upload-time = "2025-10-10T15:36:16.91Z" }, + { url = "https://files.pythonhosted.org/packages/cb/3c/8418969879c26522019c1025171cefbb2a8586b6789ea13254ac602986c0/sqlalchemy-2.0.44-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3678a0fb72c8a6a29422b2732fe423db3ce119c34421b5f9955873eb9b62c1e", size = 3304145, upload-time = "2025-10-10T15:34:19.569Z" }, + { url = "https://files.pythonhosted.org/packages/94/2d/fdb9246d9d32518bda5d90f4b65030b9bf403a935cfe4c36a474846517cb/sqlalchemy-2.0.44-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cf6872a23601672d61a68f390e44703442639a12ee9dd5a88bbce52a695e46e", size = 3304511, upload-time = "2025-10-10T15:47:05.088Z" }, + { url = "https://files.pythonhosted.org/packages/7d/fb/40f2ad1da97d5c83f6c1269664678293d3fe28e90ad17a1093b735420549/sqlalchemy-2.0.44-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:329aa42d1be9929603f406186630135be1e7a42569540577ba2c69952b7cf399", size = 3235161, upload-time = "2025-10-10T15:34:21.193Z" }, + { url = "https://files.pythonhosted.org/packages/95/cb/7cf4078b46752dca917d18cf31910d4eff6076e5b513c2d66100c4293d83/sqlalchemy-2.0.44-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:70e03833faca7166e6a9927fbee7c27e6ecde436774cd0b24bbcc96353bce06b", size = 3261426, upload-time = "2025-10-10T15:47:07.196Z" }, + { url = "https://files.pythonhosted.org/packages/f8/3b/55c09b285cb2d55bdfa711e778bdffdd0dc3ffa052b0af41f1c5d6e582fa/sqlalchemy-2.0.44-cp311-cp311-win32.whl", hash = "sha256:253e2f29843fb303eca6b2fc645aca91fa7aa0aa70b38b6950da92d44ff267f3", size = 2105392, upload-time = "2025-10-10T15:38:20.051Z" }, + { url = "https://files.pythonhosted.org/packages/c7/23/907193c2f4d680aedbfbdf7bf24c13925e3c7c292e813326c1b84a0b878e/sqlalchemy-2.0.44-cp311-cp311-win_amd64.whl", hash = "sha256:7a8694107eb4308a13b425ca8c0e67112f8134c846b6e1f722698708741215d5", size = 2130293, upload-time = "2025-10-10T15:38:21.601Z" }, + { url = "https://files.pythonhosted.org/packages/9c/5e/6a29fa884d9fb7ddadf6b69490a9d45fded3b38541713010dad16b77d015/sqlalchemy-2.0.44-py3-none-any.whl", hash = "sha256:19de7ca1246fbef9f9d1bff8f1ab25641569df226364a0e40457dc5457c54b05", size = 1928718, upload-time = "2025-10-10T15:29:45.32Z" }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, +] + +[[package]] +name = "superqt" +version = "0.7.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments" }, + { name = "qtpy" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/71/6892281dd27e2945ec314a501d0775b10698624116ec4053a732456910e8/superqt-0.7.6.tar.gz", hash = "sha256:822fdba71dc391929c9d3db839f78ca2a861e2f2876926f969a288dfb2a9787e", size = 107043, upload-time = "2025-08-12T17:03:04.383Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/60/b86845ddd057a307b92b7f5866f9e5e12bb5a471e087d71b481d68792c7f/superqt-0.7.6-py3-none-any.whl", hash = "sha256:6961833acd67df62b12918ded9945c5e7b17d75452c0159a121d799d65e2f0c2", size = 100560, upload-time = "2025-08-12T17:03:03.085Z" }, +] + +[[package]] +name = "sympy" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, +] + +[[package]] +name = "terminado" +version = "0.18.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess", marker = "os_name != 'nt'" }, + { name = "pywinpty", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "tornado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/11/965c6fd8e5cc254f1fe142d547387da17a8ebfd75a3455f637c663fb38a0/terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e", size = 32701, upload-time = "2024-03-12T14:34:39.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/9e/2064975477fdc887e47ad42157e214526dcad8f317a948dee17e1659a62f/terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0", size = 14154, upload-time = "2024-03-12T14:34:36.569Z" }, +] + +[[package]] +name = "threadpoolctl" +version = "3.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b7/4d/08c89e34946fce2aec4fbb45c9016efd5f4d7f24af8e5d93296e935631d8/threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e", size = 21274, upload-time = "2025-03-13T13:49:23.031Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" }, +] + +[[package]] +name = "tifffile" +version = "2025.10.16" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2d/b5/0d8f3d395f07d25ec4cafcdfc8cab234b2cc6bf2465e9d7660633983fe8f/tifffile-2025.10.16.tar.gz", hash = "sha256:425179ec7837ac0e07bc95d2ea5bea9b179ce854967c12ba07fc3f093e58efc1", size = 371848, upload-time = "2025-10-16T22:56:09.043Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/5e/56c751afab61336cf0e7aa671b134255a30f15f59cd9e04f59c598a37ff5/tifffile-2025.10.16-py3-none-any.whl", hash = "sha256:41463d979c1c262b0a5cdef2a7f95f0388a072ad82d899458b154a48609d759c", size = 231162, upload-time = "2025-10-16T22:56:07.214Z" }, +] + +[[package]] +name = "tinycss2" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7a/fd/7a5ee21fd08ff70d3d33a5781c255cbe779659bd03278feb98b19ee550f4/tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7", size = 87085, upload-time = "2024-10-24T14:58:29.895Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610, upload-time = "2024-10-24T14:58:28.029Z" }, +] + +[[package]] +name = "toml" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253, upload-time = "2020-11-01T01:40:22.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588, upload-time = "2020-11-01T01:40:20.672Z" }, +] + +[[package]] +name = "torch" +version = "2.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "jinja2" }, + { name = "networkx" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvshmem-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "sympy" }, + { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "typing-extensions" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/db/c064112ac0089af3d2f7a2b5bfbabf4aa407a78b74f87889e524b91c5402/torch-2.9.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:62b3fd888277946918cba4478cf849303da5359f0fb4e3bfb86b0533ba2eaf8d", size = 104220430, upload-time = "2025-11-12T15:20:31.705Z" }, + { url = "https://files.pythonhosted.org/packages/56/be/76eaa36c9cd032d3b01b001e2c5a05943df75f26211f68fae79e62f87734/torch-2.9.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d033ff0ac3f5400df862a51bdde9bad83561f3739ea0046e68f5401ebfa67c1b", size = 899821446, upload-time = "2025-11-12T15:20:15.544Z" }, + { url = "https://files.pythonhosted.org/packages/47/cc/7a2949e38dfe3244c4df21f0e1c27bce8aedd6c604a587dd44fc21017cb4/torch-2.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:0d06b30a9207b7c3516a9e0102114024755a07045f0c1d2f2a56b1819ac06bcb", size = 110973074, upload-time = "2025-11-12T15:21:39.958Z" }, + { url = "https://files.pythonhosted.org/packages/1e/ce/7d251155a783fb2c1bb6837b2b7023c622a2070a0a72726ca1df47e7ea34/torch-2.9.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:52347912d868653e1528b47cafaf79b285b98be3f4f35d5955389b1b95224475", size = 74463887, upload-time = "2025-11-12T15:20:36.611Z" }, +] + +[[package]] +name = "torchmetrics" +version = "1.8.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lightning-utilities" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "torch" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/2e/48a887a59ecc4a10ce9e8b35b3e3c5cef29d902c4eac143378526e7485cb/torchmetrics-1.8.2.tar.gz", hash = "sha256:cf64a901036bf107f17a524009eea7781c9c5315d130713aeca5747a686fe7a5", size = 580679, upload-time = "2025-09-03T14:00:54.077Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/21/aa0f434434c48490f91b65962b1ce863fdcce63febc166ca9fe9d706c2b6/torchmetrics-1.8.2-py3-none-any.whl", hash = "sha256:08382fd96b923e39e904c4d570f3d49e2cc71ccabd2a94e0f895d1f0dac86242", size = 983161, upload-time = "2025-09-03T14:00:51.921Z" }, +] + +[[package]] +name = "torchvision" +version = "0.24.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, + { name = "torch" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/69/30f5f03752aa1a7c23931d2519b31e557f3f10af5089d787cddf3b903ecf/torchvision-0.24.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:056c525dc875f18fe8e9c27079ada166a7b2755cea5a2199b0bc7f1f8364e600", size = 1891436, upload-time = "2025-11-12T15:25:04.3Z" }, + { url = "https://files.pythonhosted.org/packages/0c/69/49aae86edb75fe16460b59a191fcc0f568c2378f780bb063850db0fe007a/torchvision-0.24.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:1e39619de698e2821d71976c92c8a9e50cdfd1e993507dfb340f2688bfdd8283", size = 2387757, upload-time = "2025-11-12T15:25:06.795Z" }, + { url = "https://files.pythonhosted.org/packages/11/c9/1dfc3db98797b326f1d0c3f3bb61c83b167a813fc7eab6fcd2edb8c7eb9d/torchvision-0.24.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a0f106663e60332aa4fcb1ca2159ef8c3f2ed266b0e6df88de261048a840e0df", size = 8047682, upload-time = "2025-11-12T15:25:21.125Z" }, + { url = "https://files.pythonhosted.org/packages/fa/bb/cfc6a6f6ccc84a534ed1fdf029ae5716dd6ff04e57ed9dc2dab38bf652d5/torchvision-0.24.1-cp311-cp311-win_amd64.whl", hash = "sha256:a9308cdd37d8a42e14a3e7fd9d271830c7fecb150dd929b642f3c1460514599a", size = 4037588, upload-time = "2025-11-12T15:25:14.402Z" }, +] + +[[package]] +name = "tornado" +version = "6.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/ce/1eb500eae19f4648281bb2186927bb062d2438c2e5093d1360391afd2f90/tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0", size = 510821, upload-time = "2025-08-08T18:27:00.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/48/6a7529df2c9cc12efd2e8f5dd219516184d703b34c06786809670df5b3bd/tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6", size = 442563, upload-time = "2025-08-08T18:26:42.945Z" }, + { url = "https://files.pythonhosted.org/packages/f2/b5/9b575a0ed3e50b00c40b08cbce82eb618229091d09f6d14bce80fc01cb0b/tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef", size = 440729, upload-time = "2025-08-08T18:26:44.473Z" }, + { url = "https://files.pythonhosted.org/packages/1b/4e/619174f52b120efcf23633c817fd3fed867c30bff785e2cd5a53a70e483c/tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e", size = 444295, upload-time = "2025-08-08T18:26:46.021Z" }, + { url = "https://files.pythonhosted.org/packages/95/fa/87b41709552bbd393c85dd18e4e3499dcd8983f66e7972926db8d96aa065/tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882", size = 443644, upload-time = "2025-08-08T18:26:47.625Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/fb15f06e33d7430ca89420283a8762a4e6b8025b800ea51796ab5e6d9559/tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108", size = 443878, upload-time = "2025-08-08T18:26:50.599Z" }, + { url = "https://files.pythonhosted.org/packages/11/92/fe6d57da897776ad2e01e279170ea8ae726755b045fe5ac73b75357a5a3f/tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c", size = 444549, upload-time = "2025-08-08T18:26:51.864Z" }, + { url = "https://files.pythonhosted.org/packages/9b/02/c8f4f6c9204526daf3d760f4aa555a7a33ad0e60843eac025ccfd6ff4a93/tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4", size = 443973, upload-time = "2025-08-08T18:26:53.625Z" }, + { url = "https://files.pythonhosted.org/packages/ae/2d/f5f5707b655ce2317190183868cd0f6822a1121b4baeae509ceb9590d0bd/tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04", size = 443954, upload-time = "2025-08-08T18:26:55.072Z" }, + { url = "https://files.pythonhosted.org/packages/e8/59/593bd0f40f7355806bf6573b47b8c22f8e1374c9b6fd03114bd6b7a3dcfd/tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0", size = 445023, upload-time = "2025-08-08T18:26:56.677Z" }, + { url = "https://files.pythonhosted.org/packages/c7/2a/f609b420c2f564a748a2d80ebfb2ee02a73ca80223af712fca591386cafb/tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f", size = 445427, upload-time = "2025-08-08T18:26:57.91Z" }, + { url = "https://files.pythonhosted.org/packages/5e/4f/e1f65e8f8c76d73658b33d33b81eed4322fb5085350e4328d5c956f0c8f9/tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af", size = 444456, upload-time = "2025-08-08T18:26:59.207Z" }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, +] + +[[package]] +name = "trame" +version = "3.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, + { name = "trame-client" }, + { name = "trame-common" }, + { name = "trame-server" }, + { name = "wslink" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/35/ac/ebd44ac237841d131314e41e0b1654926b77517b0553d7a7f4227778db07/trame-3.12.0.tar.gz", hash = "sha256:88b861162cb8b025e84e93f17dcfd43a84d02d2c1608c9f6d58e3cd646a50c05", size = 23493, upload-time = "2025-08-18T20:21:40.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/15/5869b2c7556fce52306b6b65b06ec7c088f063b865cdfa75ad30bc229b7c/trame-3.12.0-py3-none-any.whl", hash = "sha256:9b33020625e0d1710d060c0fabe7b3be0e31b5e5138439ec9a796faf6fe96915", size = 28516, upload-time = "2025-08-18T20:21:39.037Z" }, +] + +[[package]] +name = "trame-client" +version = "3.11.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "trame-common" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/a5/febe01d66c7524882c5f4f3e75affbf112896b660a8a53ddc505eeaa57f7/trame_client-3.11.2.tar.gz", hash = "sha256:98b3f09d0fbdb09cd29eac61c945a76dcad4a08cfb4843abce5a148fd6fc7316", size = 240877, upload-time = "2025-10-08T16:09:56.837Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/3b/defac846c42dfaf27090d8294fbda0cbbeeb2e80b4b651841cc0e8f35d34/trame_client-3.11.2-py3-none-any.whl", hash = "sha256:f4d9364ea89cdb9d128fcebe4ab5034e5c20662feb5fee858cfe2eca3dea4771", size = 245066, upload-time = "2025-10-08T16:09:55.25Z" }, +] + +[[package]] +name = "trame-common" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9b/81/fca0fde4ce06d6afafacfe44fc3989a7d89982a4a83c3252fe3679ecaaeb/trame_common-1.0.1.tar.gz", hash = "sha256:9d4af2d9a6d08a7405977f459931cab9d4b53ae120a80264545f679d1f9a94bc", size = 18375, upload-time = "2025-07-27T04:45:24.794Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/9a/e77b16b3f13d87afd2c741dbee0e3affe859f0f2ac5641da8ffe5de8748f/trame_common-1.0.1-py3-none-any.whl", hash = "sha256:b8f568a6f917a511f9e24060e20b8a95860729b10307ac04d2336d6780e3a2e6", size = 21687, upload-time = "2025-07-27T04:45:23.323Z" }, +] + +[[package]] +name = "trame-server" +version = "3.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "more-itertools" }, + { name = "wslink" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/94/7e536b0bada379962268499e2051f5ce49ad1c2b2fd9ac58ae78a63b5f55/trame_server-3.8.1.tar.gz", hash = "sha256:88a2ab18d48664df903f2ef2c93734b96aa0ff51170b93aa3b218be80b70529a", size = 39422, upload-time = "2025-10-30T16:46:01.649Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/14/d99c5a66cef93c4f8f149cd6781dd5c30472bc9350c28a9cee31a1b7d7c8/trame_server-3.8.1-py3-none-any.whl", hash = "sha256:b2a461432c36fcfc0f2d1805a7e766fff59b18155b5c6a1a43e1ab112c5eb2f3", size = 44160, upload-time = "2025-10-30T16:45:59.957Z" }, +] + +[[package]] +name = "trame-vtk" +version = "2.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "trame-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/2c/8bb4f9f04b43a1343239839f0e4049b981e7e7c278a2ec8fb97037259dfe/trame_vtk-2.10.0.tar.gz", hash = "sha256:0e4cabd78c1e8b67da857ba5c3a404a2195cb3e849a252bae51575291bef01ad", size = 758700, upload-time = "2025-10-02T19:15:11.56Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/e4/c1b50fddaaf546b342f7e8f4cbd78a9e9e5d89249b2d29c55c9cec767347/trame_vtk-2.10.0-py3-none-any.whl", hash = "sha256:455e4b83f401cfe493a1f54fa06844ad53b7f4052123cd4c896e98411db427c8", size = 780831, upload-time = "2025-10-02T19:15:09.648Z" }, +] + +[[package]] +name = "trame-vuetify" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "trame-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/02/9a/a74decfd43ff7dedc2b3a32417ac88d1f105a2415d13ca85645b66bed144/trame_vuetify-3.1.0.tar.gz", hash = "sha256:be7d53da7fd755ce0bf74ecd52b9dc324c3ceac1a60442f8d371ecf9c9abe046", size = 5070287, upload-time = "2025-09-25T14:59:30.52Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/4c/4cc9ab46f231997dbcc6a4f05bb9fe80e842d52ac16ca963fb6747531560/trame_vuetify-3.1.0-py3-none-any.whl", hash = "sha256:1700c993c526aa6095a1bba62431dc5c95f2c302d19a6422a2cd5aab74ec5e77", size = 5097605, upload-time = "2025-09-25T14:59:28.466Z" }, +] + +[[package]] +name = "triton" +version = "3.5.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/72/ec90c3519eaf168f22cb1757ad412f3a2add4782ad3a92861c9ad135d886/triton-3.5.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:61413522a48add32302353fdbaaf92daaaab06f6b5e3229940d21b5207f47579", size = 170425802, upload-time = "2025-11-11T17:40:53.209Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "uncertainties" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/0c/cb09f33b26955399c675ab378e4063ed7e48422d3d49f96219ab0be5eba9/uncertainties-3.2.3.tar.gz", hash = "sha256:76a5653e686f617a42922d546a239e9efce72e6b35411b7750a1d12dcba03031", size = 160492, upload-time = "2025-04-21T19:58:28.63Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/5e/f1e1dd319e35e962a4e00b33150a8868b6329cc1d19fd533436ba5488f09/uncertainties-3.2.3-py3-none-any.whl", hash = "sha256:313353900d8f88b283c9bad81e7d2b2d3d4bcc330cbace35403faaed7e78890a", size = 60118, upload-time = "2025-04-21T19:58:26.864Z" }, +] + +[[package]] +name = "uri-template" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/31/c7/0336f2bd0bcbada6ccef7aaa25e443c118a704f828a0620c6fa0207c1b64/uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7", size = 21678, upload-time = "2023-06-21T01:49:05.374Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/00/3fca040d7cf8a32776d3d81a00c8ee7457e00f80c649f1e4a863c8321ae9/uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363", size = 11140, upload-time = "2023-06-21T01:49:03.467Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "vtk" +version = "9.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "matplotlib" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/8e/c8a4dee522ad0436c846f0f62444a2699cc0d72b2554aa09bcfcf2c01f29/vtk-9.5.2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:afcbc6dc122ebba877793940fda8fd2cbe14e1dae590e6872ea74894abdab9be", size = 86865360, upload-time = "2025-09-18T00:56:33.18Z" }, + { url = "https://files.pythonhosted.org/packages/39/d5/ec52c2cea957221d5e41ebe1768c78d79c714f1e0e635cc4a545cab69d31/vtk-9.5.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:005877a568b96cf00ceb5bec268cf102db756bed509cb240fa40ada414a24bf0", size = 80550040, upload-time = "2025-09-18T00:56:37.337Z" }, + { url = "https://files.pythonhosted.org/packages/b9/00/73a2e3548eae8c122569ae40e947166620cf192ec9a46c6be94e04597dc3/vtk-9.5.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2e2fe2535483adb1ba8cc83a0dc296faaffa2505808a3b04f697084f656e5f84", size = 112239648, upload-time = "2025-09-18T00:56:42.591Z" }, + { url = "https://files.pythonhosted.org/packages/4e/fb/9a88a43be9deccdfb04228a059aa6e7e0416b96223436cf040bc627b5a1d/vtk-9.5.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:0248aab2ee51a69fadcdcf74697a045e2d525009a35296100eed2211f0cca2bb", size = 103718432, upload-time = "2025-09-18T00:56:49.733Z" }, + { url = "https://files.pythonhosted.org/packages/3a/f0/2f499af38d5b30f0ee80f644ef16be3be739f475d19fbdf518ab622dfb88/vtk-9.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:f78674fd265022499ea6b7f03d7f11a861e89e1df043592a82e4f5235c537ef5", size = 63914667, upload-time = "2025-09-18T00:56:53.795Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.2.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, +] + +[[package]] +name = "webcolors" +version = "25.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/7a/eb316761ec35664ea5174709a68bbd3389de60d4a1ebab8808bfc264ed67/webcolors-25.10.0.tar.gz", hash = "sha256:62abae86504f66d0f6364c2a8520de4a0c47b80c03fc3a5f1815fedbef7c19bf", size = 53491, upload-time = "2025-10-31T07:51:03.977Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/cc/e097523dd85c9cf5d354f78310927f1656c422bd7b2613b2db3e3f9a0f2c/webcolors-25.10.0-py3-none-any.whl", hash = "sha256:032c727334856fc0b968f63daa252a1ac93d33db2f5267756623c210e57a4f1d", size = 14905, upload-time = "2025-10-31T07:51:01.778Z" }, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721, upload-time = "2017-04-05T20:21:34.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" }, +] + +[[package]] +name = "websocket-client" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/41/aa4bf9664e4cda14c3b39865b12251e8e7d239f4cd0e3cc1b6c2ccde25c1/websocket_client-1.9.0.tar.gz", hash = "sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98", size = 70576, upload-time = "2025-10-07T21:16:36.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/db/b10e48aa8fff7407e67470363eac595018441cf32d5e1001567a7aeba5d2/websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef", size = 82616, upload-time = "2025-10-07T21:16:34.951Z" }, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925, upload-time = "2024-11-08T15:52:18.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, +] + +[[package]] +name = "widgetsnbextension" +version = "4.0.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/f4/c67440c7fb409a71b7404b7aefcd7569a9c0d6bd071299bf4198ae7a5d95/widgetsnbextension-4.0.15.tar.gz", hash = "sha256:de8610639996f1567952d763a5a41af8af37f2575a41f9852a38f947eb82a3b9", size = 1097402, upload-time = "2025-11-01T21:15:55.178Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/0e/fa3b193432cfc60c93b42f3be03365f5f909d2b3ea410295cf36df739e31/widgetsnbextension-4.0.15-py3-none-any.whl", hash = "sha256:8156704e4346a571d9ce73b84bee86a29906c9abfd7223b7228a28899ccf3366", size = 2196503, upload-time = "2025-11-01T21:15:53.565Z" }, +] + +[[package]] +name = "wslink" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "msgpack" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/28/7c7cf32d544f464b58f14b8b9da2acfd382729c8c2074abe0dfe671361fc/wslink-2.5.0.tar.gz", hash = "sha256:61f79460affeeeb05284821f5ec5bc927153d587b661d6cfe33cbe260f9cfae3", size = 31996, upload-time = "2025-10-20T15:38:53.161Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/33/44baf508511b036e455693cd874088f622b8f68e61415e92581a3ffdbbee/wslink-2.5.0-py3-none-any.whl", hash = "sha256:e5738958cc6cbe95581108df066be31a9ead0c485d2b27ca3f3f4865fc08b761", size = 36916, upload-time = "2025-10-20T15:38:52.148Z" }, +] + +[[package]] +name = "xrayutilities" +version = "1.7.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "h5py" }, + { name = "lmfit" }, + { name = "numpy" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ea/28/6e0c248da04ed940ad112007bdb8f6e200a9b3889f11971adbc49bdf6de3/xrayutilities-1.7.11.tar.gz", hash = "sha256:d1d4604bab86696b7535176d0d9e7b7ab457fd429b1fed59b7dc13043d1cb1f2", size = 9124920, upload-time = "2025-11-11T16:45:57.091Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/06/05c0feb51112aea9988f5deb491ac4b3a24ed4534ad354a3c00efb1ba66c/xrayutilities-1.7.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6216bdafe6bc051c030eb75695fc4caa67dd58161f83e6aaebfded27a2e8c251", size = 8431055, upload-time = "2025-11-11T16:45:31.405Z" }, + { url = "https://files.pythonhosted.org/packages/f6/4c/cf3fad034f48934f1c06b9f09c7933f8bf9e0f5c4140ad5351905b3be270/xrayutilities-1.7.11-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:803971f6ba50dcb222cb0f5b869f2aacc10151c43d720ff645a8403ca08f60d0", size = 8556190, upload-time = "2025-11-11T16:45:33.346Z" }, + { url = "https://files.pythonhosted.org/packages/7b/a2/b9cf10014be919ee8facc1db4cd802b935e04476671ce46aad3445d86cac/xrayutilities-1.7.11-cp311-cp311-win_amd64.whl", hash = "sha256:55fd9795f1b4a6d805e24fccf239b9aae4f91c9f8d73cd8e00ca2c707c2927d1", size = 8458491, upload-time = "2025-11-11T16:45:35.624Z" }, +] + +[[package]] +name = "yarl" +version = "1.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/27/5ab13fc84c76a0250afd3d26d5936349a35be56ce5785447d6c423b26d92/yarl-1.22.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ab72135b1f2db3fed3997d7e7dc1b80573c67138023852b6efb336a5eae6511", size = 141607, upload-time = "2025-10-06T14:09:16.298Z" }, + { url = "https://files.pythonhosted.org/packages/6a/a1/d065d51d02dc02ce81501d476b9ed2229d9a990818332242a882d5d60340/yarl-1.22.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:669930400e375570189492dc8d8341301578e8493aec04aebc20d4717f899dd6", size = 94027, upload-time = "2025-10-06T14:09:17.786Z" }, + { url = "https://files.pythonhosted.org/packages/c1/da/8da9f6a53f67b5106ffe902c6fa0164e10398d4e150d85838b82f424072a/yarl-1.22.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:792a2af6d58177ef7c19cbf0097aba92ca1b9cb3ffdd9c7470e156c8f9b5e028", size = 94963, upload-time = "2025-10-06T14:09:19.662Z" }, + { url = "https://files.pythonhosted.org/packages/68/fe/2c1f674960c376e29cb0bec1249b117d11738db92a6ccc4a530b972648db/yarl-1.22.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ea66b1c11c9150f1372f69afb6b8116f2dd7286f38e14ea71a44eee9ec51b9d", size = 368406, upload-time = "2025-10-06T14:09:21.402Z" }, + { url = "https://files.pythonhosted.org/packages/95/26/812a540e1c3c6418fec60e9bbd38e871eaba9545e94fa5eff8f4a8e28e1e/yarl-1.22.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3e2daa88dc91870215961e96a039ec73e4937da13cf77ce17f9cad0c18df3503", size = 336581, upload-time = "2025-10-06T14:09:22.98Z" }, + { url = "https://files.pythonhosted.org/packages/0b/f5/5777b19e26fdf98563985e481f8be3d8a39f8734147a6ebf459d0dab5a6b/yarl-1.22.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba440ae430c00eee41509353628600212112cd5018d5def7e9b05ea7ac34eb65", size = 388924, upload-time = "2025-10-06T14:09:24.655Z" }, + { url = "https://files.pythonhosted.org/packages/86/08/24bd2477bd59c0bbd994fe1d93b126e0472e4e3df5a96a277b0a55309e89/yarl-1.22.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e6438cc8f23a9c1478633d216b16104a586b9761db62bfacb6425bac0a36679e", size = 392890, upload-time = "2025-10-06T14:09:26.617Z" }, + { url = "https://files.pythonhosted.org/packages/46/00/71b90ed48e895667ecfb1eaab27c1523ee2fa217433ed77a73b13205ca4b/yarl-1.22.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c52a6e78aef5cf47a98ef8e934755abf53953379b7d53e68b15ff4420e6683d", size = 365819, upload-time = "2025-10-06T14:09:28.544Z" }, + { url = "https://files.pythonhosted.org/packages/30/2d/f715501cae832651d3282387c6a9236cd26bd00d0ff1e404b3dc52447884/yarl-1.22.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3b06bcadaac49c70f4c88af4ffcfbe3dc155aab3163e75777818092478bcbbe7", size = 363601, upload-time = "2025-10-06T14:09:30.568Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f9/a678c992d78e394e7126ee0b0e4e71bd2775e4334d00a9278c06a6cce96a/yarl-1.22.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:6944b2dc72c4d7f7052683487e3677456050ff77fcf5e6204e98caf785ad1967", size = 358072, upload-time = "2025-10-06T14:09:32.528Z" }, + { url = "https://files.pythonhosted.org/packages/2c/d1/b49454411a60edb6fefdcad4f8e6dbba7d8019e3a508a1c5836cba6d0781/yarl-1.22.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d5372ca1df0f91a86b047d1277c2aaf1edb32d78bbcefffc81b40ffd18f027ed", size = 385311, upload-time = "2025-10-06T14:09:34.634Z" }, + { url = "https://files.pythonhosted.org/packages/87/e5/40d7a94debb8448c7771a916d1861d6609dddf7958dc381117e7ba36d9e8/yarl-1.22.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:51af598701f5299012b8416486b40fceef8c26fc87dc6d7d1f6fc30609ea0aa6", size = 381094, upload-time = "2025-10-06T14:09:36.268Z" }, + { url = "https://files.pythonhosted.org/packages/35/d8/611cc282502381ad855448643e1ad0538957fc82ae83dfe7762c14069e14/yarl-1.22.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b266bd01fedeffeeac01a79ae181719ff848a5a13ce10075adbefc8f1daee70e", size = 370944, upload-time = "2025-10-06T14:09:37.872Z" }, + { url = "https://files.pythonhosted.org/packages/2d/df/fadd00fb1c90e1a5a8bd731fa3d3de2e165e5a3666a095b04e31b04d9cb6/yarl-1.22.0-cp311-cp311-win32.whl", hash = "sha256:a9b1ba5610a4e20f655258d5a1fdc7ebe3d837bb0e45b581398b99eb98b1f5ca", size = 81804, upload-time = "2025-10-06T14:09:39.359Z" }, + { url = "https://files.pythonhosted.org/packages/b5/f7/149bb6f45f267cb5c074ac40c01c6b3ea6d8a620d34b337f6321928a1b4d/yarl-1.22.0-cp311-cp311-win_amd64.whl", hash = "sha256:078278b9b0b11568937d9509b589ee83ef98ed6d561dfe2020e24a9fd08eaa2b", size = 86858, upload-time = "2025-10-06T14:09:41.068Z" }, + { url = "https://files.pythonhosted.org/packages/2b/13/88b78b93ad3f2f0b78e13bfaaa24d11cbc746e93fe76d8c06bf139615646/yarl-1.22.0-cp311-cp311-win_arm64.whl", hash = "sha256:b6a6f620cfe13ccec221fa312139135166e47ae169f8253f72a0abc0dae94376", size = 81637, upload-time = "2025-10-06T14:09:42.712Z" }, + { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] diff --git a/viewer/TEST_pyvista_plot.py b/viewer/TEST_pyvista_plot.py deleted file mode 100644 index a9f691f..0000000 --- a/viewer/TEST_pyvista_plot.py +++ /dev/null @@ -1,38 +0,0 @@ -import pyvista as pv -import numpy as np - -# Set Plot Theme -pv.set_plot_theme('dark') - -# Load data -x = np.load('qx.npy') -y = np.load('qy.npy') -z = np.load('qz.npy') -intensity = np.load('intensity.npy') - -# Normalize intensity to [0, 1] for colormap -intensity_norm = (intensity - np.min(intensity)) / (np.max(intensity) - np.min(intensity)) - -# Generate points -points = np.column_stack((x, y, z)) -cloud = pv.PolyData(points) - -# Apply colors -cloud['intensity'] = intensity # Store intensity for scalar mapping - -# Create plotter -plotter = pv.Plotter() -plotter.add_mesh( - cloud, - scalars='intensity', # Use intensity for the scalar bar - cmap='viridis', - show_scalar_bar=True, # Enable scalar bar - opacity=intensity_norm, # Set transparency based on intensity - # render_points_as_spheres=True, - # point_size=10 - -) - -plotter.show_bounds(xtitle='H Axis', ytitle='K Axis', ztitle='L Axis') -plotter.add_axes(xlabel='H', ylabel='K', zlabel='L') -plotter.show() diff --git a/viewer/analysis_window.py b/viewer/analysis_window.py index 73ab712..0f2a9a9 100755 --- a/viewer/analysis_window.py +++ b/viewer/analysis_window.py @@ -84,9 +84,9 @@ class AnalysisWindow(QMainWindow): view_comx (pyqtgraph.ImageView): Image view for x center-of-mass if consumer type is vectorized. view_comy (pyqtgraph.ImageView): Image view for y center-of-mass if consumer type is vectorized. view_intensity (pyqtgraph.ImageView): Image view for intensity if consumer type is vectorized. - plot_comx (pg.PlotWidget): Plot widget for x center-of-mass if consumer type is spontaneous. - plot_comy (pg.PlotWidget): Plot widget for y center-of-mass if consumer type is spontaneous. - plot_intensity (pg.PlotWidget): Plot widget for intensity if consumer type is spontaneous. + plot_comx (pg.PlotWidget): Plot widget for x center-of-mass if consumer type is continuous. + plot_comy (pg.PlotWidget): Plot widget for y center-of-mass if consumer type is continuous. + plot_intensity (pg.PlotWidget): Plot widget for intensity if consumer type is continuous. update_counter (int): Counter for updates to plotting data. max_updates (int): Maximum number of updates allowed. analysis_index (int): Index for identifying analysis data from metadata. @@ -107,7 +107,7 @@ def __init__(self, parent): self.setWindowTitle('Analysis Window') # TODO: load config separately using the filepath provided by the parent self.config: dict = self.parent.reader.config - self.consumer_type = self.config.get("CONSUMER_TYPE", "") + self.consumer_mode = self.config.get("CONSUMER_MODE", "") self.xpos_path = None self.ypos_path = None self.save_path = None @@ -124,7 +124,7 @@ def __init__(self, parent): self.max_updates = 10 self.analysis_index = self.parent.reader.analysis_index if self.analysis_index is not None: - self.analysis_attributes: dict = self.parent.reader.attributes[self.analysis_index] if self.consumer_type == "vectorized" else self.parent.reader.analysis_cache_dict + self.analysis_attributes: dict = self.parent.reader.attributes[self.analysis_index] if self.consumer_mode == "vectorized" else self.parent.reader.analysis_cache_dict else: self.analysis_attributes = {} @@ -137,7 +137,7 @@ def __init__(self, parent): # self.btn_create_hdf5.clicked.connect(self.save_hdf5) self.calc_freq.valueChanged.connect(self.frequency_changed) - self.cbox_select_roi.activated.connect(self.roi_selection_changed) + # self.cbox_select_roi.activated.connect(self.roi_selection_changed) self.chk_freeze.stateChanged.connect(self.freeze_plotting_checked) self.btn_reset.clicked.connect(self.reset_plot) self.sbox_intensity_min.valueChanged.connect(self.min_max_changed) @@ -152,9 +152,9 @@ def configure_plots(self) -> None: Configures the plotting interface based on the consumer type. """ # cmap = pg.colormap.getFromMatplotlib('viridis') - if self.consumer_type == "spontaneous": + if self.consumer_mode == "continuous": self.init_scatter_plot() - elif self.consumer_type == "vectorized": + elif self.consumer_mode == "vectorized": self.init_image_view() else: #TODO: replace with w/ a message box @@ -183,7 +183,7 @@ def reset_plot(self) -> None: This function resets the plot and clears all caches when the reset button is clicked. """ # self.status_text.setText("Waiting for the first scan...") - if self.consumer_type == "vectorized": + if self.consumer_mode == "vectorized": self.view_intensity.clear() self.view_comx.clear() self.view_comy.clear() @@ -212,26 +212,26 @@ def check_num_rois(self) -> None: for i in range(num_rois): self.cbox_select_roi.addItem(f'ROI{i+1}') - def roi_selection_changed(self) -> None: - """ - Updates the ROI selection based on the user's choice in the dropdown. - """ - text = self.cbox_select_roi.currentText() - if text.startswith('ROI'): - x = self.parent.reader.metadata[f"{self.parent.reader.pva_prefix}:{text}:MinX"] - y = self.parent.reader.metadata[f"{self.parent.reader.pva_prefix}:{text}:MinY"] - w= self.parent.reader.metadata[f"{self.parent.reader.pva_prefix}:{text}:SizeX"] - h = self.parent.reader.metadata[f"{self.parent.reader.pva_prefix}:{text}:SizeY"] - #change the roi values being analyzed - self.parent.roi_x = x - self.parent.roi_y = y - self.parent.roi_width = w - self.parent.roi_height = h - # Make changes seen in the text boxes - self.roi_x.setValue(x) - self.roi_y.setValue(y) - self.roi_width.setValue(w) - self.roi_height.setValue(h) + # def roi_selection_changed(self) -> None: + # """ + # Updates the ROI selection based on the user's choice in the dropdown. + # """ + # text = self.cbox_select_roi.currentText() + # if text.startswith('ROI'): + # x = self.parent.reader.metadata[f"{self.parent.reader.pva_prefix}:{text}:MinX"] + # y = self.parent.reader.metadata[f"{self.parent.reader.pva_prefix}:{text}:MinY"] + # w= self.parent.reader.metadata[f"{self.parent.reader.pva_prefix}:{text}:SizeX"] + # h = self.parent.reader.metadata[f"{self.parent.reader.pva_prefix}:{text}:SizeY"] + # #change the roi values being analyzed + # self.parent.roi_x = x + # self.parent.roi_y = y + # self.parent.roi_width = w + # self.parent.roi_height = h + # # Make changes seen in the text boxes + # self.roi_x.setValue(x) + # self.roi_y.setValue(y) + # self.roi_width.setValue(w) + # self.roi_height.setValue(h) # def roi_boxes_changed(self) -> None: # """ @@ -262,9 +262,9 @@ def min_max_changed(self) -> None: self.min_comy = self.sbox_comy_min.value() self.max_comy = self.sbox_comy_max.value() - if self.config['CONSUMER_TYPE'] == 'spontaneous': + if self.config['CONSUMER_MODE'] == 'continuous': self.plot_images() - if self.config['CONSUMER_TYPE'] == 'vectorized': + if self.config['CONSUMER_MODE'] == 'vectorized': self.view_intensity.setLevels(self.min_intensity, self.max_intensity) self.view_comx.setLevels(self.min_comx, self.max_comx) self.view_comy.setLevels(self.min_comy, self.max_comy) @@ -298,9 +298,9 @@ def update_vectorized_image(self, intensity, com_x, com_y) -> None: self.view_comy.setImage(img=com_y_matrix.T, autoRange=False, autoLevels=False, autoHistogramRange=False) - def update_spontaneous_image(self, intensity, com_x, com_y, position) -> None: + def update_continuous_image(self, intensity, com_x, com_y, position) -> None: """ - Updates the scatter plots for spontaneous data. + Updates the scatter plots for continuous data. Args: intensity (list): List of intensity values. @@ -346,12 +346,12 @@ def plot_images(self) -> None: self.update_counter += 1 # TODO: test if this line can be assigned once and use update on its own - if self.consumer_type == "vectorized": + if self.consumer_mode == "vectorized": self.analysis_attributes = self.parent.reader.attributes[self.analysis_index] intensity = self.analysis_attributes["value"][0]["value"].get("Intensity",0.0) com_x = self.analysis_attributes["value"][0]["value"].get("ComX",0.0) com_y = self.analysis_attributes["value"][0]["value"].get("ComY",0.0) - elif self.consumer_type == "spontaneous": + elif self.consumer_mode == "continuous": intensity = list(self.analysis_attributes["Intensity"].values()) com_x = list(self.analysis_attributes["ComX"].values()) com_y = list(self.analysis_attributes["ComY"].values()) @@ -372,15 +372,15 @@ def plot_images(self) -> None: self.sbox_comy_max.setValue(self.max_comy) # print(intensity) - if self.consumer_type == "vectorized": + if self.consumer_mode == "vectorized": self.update_vectorized_image(intensity=intensity, com_x=com_x, com_y=com_y,) - elif self.consumer_type == "spontaneous": - self.update_spontaneous_image(intensity=intensity, com_x=com_x, com_y=com_y, position=position) + elif self.consumer_mode == "continuous": + self.update_continuous_image(intensity=intensity, com_x=com_x, com_y=com_y, position=position) def init_scatter_plot(self) -> None: """ Initializes scatter plots for intensity, com_x, and com_y data. - called only if consumer type is spontaneous + called only if consumer type is continuous """ self.scatter_item_intensity = pg.ScatterPlotItem() self.scatter_item_comx = pg.ScatterPlotItem() @@ -433,8 +433,6 @@ def init_image_view(self) -> None: self.view_comy.view.getAxis('left').setLabel('Scan Position Rows') self.view_comy.view.getAxis('bottom').setLabel('Scan Position Cols') - - def closeEvent(self, event): """ Handles cleanup operations when the analysis window is closed. diff --git a/viewer/area_det_viewer.py b/viewer/area_det_viewer.py index a4d9c88..305fb04 100755 --- a/viewer/area_det_viewer.py +++ b/viewer/area_det_viewer.py @@ -1,5 +1,6 @@ import os import sys +import time import subprocess import numpy as np import os.path as osp @@ -7,18 +8,20 @@ import xrayutilities as xu from PyQt5 import uic # from epics import caget +from epics import PV, pv from epics import camonitor, caget -from PyQt5.QtCore import QTimer -from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QFileDialog +from PyQt5.QtCore import Qt, QTimer, QThread, pyqtSignal +from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog, QFileDialog, QSlider # Custom imported classes -from generators import rotation_cycle -from pva_reader import PVAReader from roi_stats_dialog import RoiStatsDialog -from pv_setup_dialog import PVSetupDialog from analysis_window import AnalysisWindow +import pathlib +sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) +from utils import rotation_cycle +from utils import PVAReader, HDF5Writer +# from ..utils.size_manager import SizeManager -max_cache_size = 900 #TODO: Put this in the config file rot_gen = rotation_cycle(1,5) @@ -31,21 +34,20 @@ def __init__(self): Attributes: input_channel (str): Input channel for PVA. - roi_config (str): Path to the ROI configuration file. + config_path (str): Path to the ROI configuration file. """ super(ConfigDialog,self).__init__() uic.loadUi('gui/pv_config.ui', self) self.setWindowTitle('PV Config') # initializing variables to pass to Image Viewer - self.input_channel = "" - self.roi_config = "" + self.input_channel = '' + self.config_path = '' # class can be prefilled with text self.init_ui() # Connecting signasl to - # self.btn_edit.clicked.connect(self.json_open_file_dialog) + self.btn_clear.clicked.connect(self.clear_pv_setup) self.btn_browse.clicked.connect(self.browse_file_dialog) - self.btn_create.clicked.connect(self.new_pv_setup) self.btn_accept_reject.accepted.connect(self.dialog_accepted) def init_ui(self) -> None: @@ -53,7 +55,7 @@ def init_ui(self) -> None: Prefills text in the Line Editors for the user. """ self.le_input_channel.setText(self.le_input_channel.text()) - self.le_roi_config.setText(self.le_roi_config.text()) + self.le_config.setText(self.le_config.text()) def browse_file_dialog(self) -> None: """ @@ -61,33 +63,24 @@ def browse_file_dialog(self) -> None: """ self.pvs_path, _ = QFileDialog.getOpenFileName(self, 'Select TOML Config', 'pv_configs', '*.toml (*.toml)') - self.le_roi_config.setText(self.pvs_path) + self.le_config.setText(self.pvs_path) - def new_pv_setup(self) -> None: + def clear_pv_setup(self) -> None: """ - Opens a new window for setting up a new PV configuration within the UI. + Clears line edit that tells image view where the config file is. """ - self.new_pv_setup_dialog = PVSetupDialog(parent=self, file_mode='w', path=None) - - def edit_pv_setup(self) -> None: - """ - Opens a window for editing an existing PV configuration. - """ - if self.le_edit_file_path.text() != '': - self.edit_pv_setup_dialog = PVSetupDialog(parent=self, file_mode='r+', path=self.pvs_path) - else: - print('file path empty') + self.le_config.clear() def dialog_accepted(self) -> None: """ Handles the final step when the dialog's accept button is pressed. - Starts the ImageWindow process with filled information. + Starts the DiffractionImageWindow process with filled information. """ self.input_channel = self.le_input_channel.text() - self.roi_config = self.le_roi_config.text() - if osp.isfile(self.roi_config) or (self.roi_config == ''): - self.image_viewer = ImageWindow(input_channel=self.input_channel, - file_path=self.roi_config,) + self.config_path = self.le_config.text() + if osp.isfile(self.config_path) or (self.config_path == ''): + self.image_viewer = DiffractionImageWindow(input_channel=self.input_channel, + file_path=self.config_path,) else: print('File Path Doesn\'t Exitst') #TODO: ADD ERROR Dialog rather than print message so message is clearer @@ -95,7 +88,8 @@ def dialog_accepted(self) -> None: self.new_dialog.show() -class ImageWindow(QMainWindow): +class DiffractionImageWindow(QMainWindow): + hkl_data_updated = pyqtSignal(bool) def __init__(self, input_channel='s6lambda1:Pva1:Image', file_path=''): """ @@ -105,9 +99,9 @@ def __init__(self, input_channel='s6lambda1:Pva1:Image', file_path=''): input_channel (str): The PVA input channel for the detector. file_path (str): The file path for loading configuration. """ - super(ImageWindow, self).__init__() + super(DiffractionImageWindow, self).__init__() uic.loadUi('gui/imageshow.ui', self) - self.setWindowTitle('Image Viewer with PVAaccess') + self.setWindowTitle('DashPVA') self.show() # Initializing important variables self.reader = None @@ -116,44 +110,51 @@ def __init__(self, input_channel='s6lambda1:Pva1:Image', file_path=''): self.first_plot = True self.image_is_transposed = False self.rot_num = 0 - self.rois = [] - self.stats_dialog = {} + self.mouse_x = 0 + self.mouse_y = 0 + self.rois: list[pg.ROI] = [] + self.stats_dialogs = {} self.stats_data = {} self._input_channel = input_channel self.pv_prefix.setText(self._input_channel) self._file_path = file_path + # Initializing but not starting timers so they can be reached by different functions self.timer_labels = QTimer() self.timer_plot = QTimer() - # self.timer_rsm = QTimer() + self.file_writer_thread = QThread() self.timer_labels.timeout.connect(self.update_labels) self.timer_plot.timeout.connect(self.update_image) self.timer_plot.timeout.connect(self.update_rois) + # For testing ROIs being sent from analysis window self.roi_x = 100 self.roi_y = 200 self.roi_width = 50 self.roi_height = 50 + # HKL values + self.is_hkl_ready = False self.hkl_config = None + self.hkl_pvs = {} self.hkl_data = {} + self.q_conv = None self.qx = None self.qy = None self.qz = None self.processes = {} - # Adding widgets manually to have better control over them plot = pg.PlotItem() self.image_view = pg.ImageView(view=plot) - self.viewer_layout.addWidget(self.image_view,1,1) - self.image_view.view.getAxis('left').setLabel(text='Row [pixels]') - self.image_view.view.getAxis('bottom').setLabel(text='Columns [pixels]') + self.viewer_layout.addWidget(self.image_view,0,1) + self.image_view.view.getAxis('left').setLabel(text='SizeY [pixels]') + self.image_view.view.getAxis('bottom').setLabel(text='SizeX [pixels]') # second is a separate plot to show the horiontal avg of peaks in the image self.horizontal_avg_plot = pg.PlotWidget() self.horizontal_avg_plot.invertY(True) - self.horizontal_avg_plot.setMaximumWidth(175) - self.horizontal_avg_plot.setYLink(self.image_view.getView()) - self.viewer_layout.addWidget(self.horizontal_avg_plot, 1,0) + self.horizontal_avg_plot.setMaximumWidth(200) + self.horizontal_avg_plot.getAxis('bottom').setLabel(text='Horizontal Avg.') + self.viewer_layout.addWidget(self.horizontal_avg_plot, 0,0) # Connecting the signals to the code that will be executed self.pv_prefix.returnPressed.connect(self.start_live_view_clicked) @@ -161,7 +162,7 @@ def __init__(self, input_channel='s6lambda1:Pva1:Image', file_path=''): self.start_live_view.clicked.connect(self.start_live_view_clicked) self.stop_live_view.clicked.connect(self.stop_live_view_clicked) self.btn_analysis_window.clicked.connect(self.open_analysis_window_clicked) - self.btn_hkl_viewer.clicked.connect(self.start_hkl_viewer) + self.btn_hkl_viewer.clicked.connect(self.start_hkl_viewer_clicked) self.btn_Stats1.clicked.connect(self.stats_button_clicked) self.btn_Stats2.clicked.connect(self.stats_button_clicked) self.btn_Stats3.clicked.connect(self.stats_button_clicked) @@ -170,22 +171,25 @@ def __init__(self, input_channel='s6lambda1:Pva1:Image', file_path=''): self.rbtn_C.clicked.connect(self.c_ordering_clicked) self.rbtn_F.clicked.connect(self.f_ordering_clicked) self.rotate90degCCW.clicked.connect(self.rotation_count) + # self.rotate90degCCW.clicked.connect(self.rotate_rois) + self.log_image.clicked.connect(self.update_image) self.log_image.clicked.connect(self.reset_first_plot) self.freeze_image.stateChanged.connect(self.freeze_image_checked) self.display_rois.stateChanged.connect(self.show_rois_checked) self.chk_transpose.stateChanged.connect(self.transpose_image_checked) self.plotting_frequency.valueChanged.connect(self.start_timers) - self.log_image.clicked.connect(self.update_image) self.max_setting_val.valueChanged.connect(self.update_min_max_setting) self.min_setting_val.valueChanged.connect(self.update_min_max_setting) self.image_view.getView().scene().sigMouseMoved.connect(self.update_mouse_pos) + self.hkl_data_updated.connect(self.handle_hkl_data_update) def start_timers(self) -> None: """ Starts timers for updating labels and plotting at specified frequencies. """ - self.timer_labels.start(int(1000/100)) - self.timer_plot.start(int(1000/self.plotting_frequency.value())) + if self.reader is not None and self.reader.channel.isMonitorActive(): + self.timer_labels.start(int(1000/100)) + self.timer_plot.start(int(1000/self.plotting_frequency.value())) def stop_timers(self) -> None: """ @@ -201,9 +205,16 @@ def set_pixel_ordering(self) -> None: if self.reader is not None: if self.rbtn_C.isChecked(): self.reader.pixel_ordering = 'C' + self.reader.image_is_transposed = True elif self.rbtn_F.isChecked(): self.reader.pixel_ordering = 'F' - + self.image_is_transposed = False + self.reader.image_is_transposed = False + + def trigger_save_caches(self) -> None: + if not self.file_writer_thread.isRunning(): + self.file_writer_thread.start() + self.file_writer.save_caches_to_h5(clear_caches=True) def c_ordering_clicked(self) -> None: """ @@ -211,6 +222,7 @@ def c_ordering_clicked(self) -> None: """ if self.reader is not None: self.reader.pixel_ordering = 'C' + self.reader.image_is_transposed = True def f_ordering_clicked(self) -> None: """ @@ -218,6 +230,8 @@ def f_ordering_clicked(self) -> None: """ if self.reader is not None: self.reader.pixel_ordering = 'F' + self.image_is_transposed = False + self.reader.image_is_transposed = False def open_analysis_window_clicked(self) -> None: """ @@ -226,7 +240,7 @@ def open_analysis_window_clicked(self) -> None: if self.reader is not None: if self.reader.image is not None: self.analysis_window = AnalysisWindow(parent=self) - self.analysis_window.show() + self.analysis_window.show() def start_live_view_clicked(self) -> None: """ @@ -236,47 +250,72 @@ def start_live_view_clicked(self) -> None: Also starts monitoring the stats and adds ROIs to the viewer. """ try: - # A double check to make sure there isn't a connection already when starting + self.stop_timers() + self.image_view.clear() + self.reset_rsm_vars() if self.reader is None: self.reader = PVAReader(input_channel=self._input_channel, config_filepath=self._file_path) - self.set_pixel_ordering() - self.transpose_image_checked() - self.reader.start_channel_monitor() + self.file_writer = HDF5Writer(self.reader.OUTPUT_FILE_LOCATION, self.reader) + self.file_writer.moveToThread(self.file_writer_thread) else: - self.stop_timers() - self.reader.stop_channel_monitor() + if self.reader.channel.isMonitorActive(): + self.reader.stop_channel_monitor() + if self.file_writer_thread.isRunning(): + self.file_writer_thread.quit() + self.file_writer_thread.wait() + for roi in self.rois: + self.image_view.getView().removeItem(roi) + self.rois.clear() + self.btn_save_caches.clicked.disconnect() + # self.reader.reader_scan_complete.disconnect() + self.file_writer.hdf5_writer_finished.disconnect() del self.reader self.reader = PVAReader(input_channel=self._input_channel, config_filepath=self._file_path) - self.set_pixel_ordering() - self.transpose_image_checked() - self.reader.start_channel_monitor() - except: - print(f'Failed to Connect to {self._input_channel}') + self.file_writer.pva_reader = self.reader + # Reconnecting signals + self.reader.reader_scan_complete.connect(self.trigger_save_caches) + self.file_writer.hdf5_writer_finished.connect(self.on_writer_finished) + self.btn_save_caches.clicked.connect(self.save_caches_clicked) + + if self.reader.CACHING_MODE == 'scan': + self.file_writer_thread.start() + elif self.reader.CACHING_MODE == 'bin': + self.slider = QSlider() + self.slider.setRange(0, self.reader.BIN_COUNT-1) + self.slider.setValue(0) + self.slider.setOrientation(Qt.Horizontal) + self.slider.setTickPosition(QSlider.TicksAbove) + self.viewer_layout.addWidget(self.slider, 1, 1) + + self.set_pixel_ordering() + self.transpose_image_checked() + self.reader.start_channel_monitor() + except Exception as e: + print(f'Failed to Connect to {self._input_channel}: {e}') self.image_view.clear() self.horizontal_avg_plot.getPlotItem().clear() + self.reset_rsm_vars() + del self.file_writer del self.reader self.reader = None self.provider_name.setText('N/A') self.is_connected.setText('Disconnected') + self.btn_save_caches.clicked.disconnect() + self.file_writer_thread.terminate() - if self.reader is not None: - if not(self.reader.rois): - if ('ROI' in self.reader.config): + try: + if self.reader is not None: + if not(self.reader.rois): + if 'ROI' in self.reader.config: self.reader.start_roi_backup_monitor() - self.start_stats_monitors() - self.add_rois() - self.start_timers() - try: - self.init_hkl() - if self.hkl_data: - qxyz = self.create_rsm() - self.qx = qxyz[0].T if self.image_is_transposed else qxyz[0] - self.qy = qxyz[1].T if self.image_is_transposed else qxyz[1] - self.qz = qxyz[2].T if self.image_is_transposed else qxyz[2] - except Exception as e: - print('failed to create rsm: %s' % e) + self.start_hkl_monitors() + self.start_stats_monitors() + self.add_rois() + self.start_timers() + except Exception as e: + print(f'[Diffraction Image Viewer] Error Starting Image Viewer {e}') def stop_live_view_clicked(self) -> None: """ @@ -285,15 +324,62 @@ def stop_live_view_clicked(self) -> None: This method also updates the UI to reflect the disconnected state. """ if self.reader is not None: - self.reader.stop_channel_monitor() + if self.reader.channel.isMonitorActive(): + self.reader.stop_channel_monitor() self.stop_timers() - for key in self.stats_dialog: - self.stats_dialog[key] = None - del self.reader - self.reader = None + for key in self.stats_dialogs: + self.stats_dialogs[key] = None + # for roi in self.rois: + # self.image_view.getView().removeItem(roi) + for hkl_pv in self.hkl_pvs.values(): + hkl_pv.clear_callbacks() + hkl_pv.disconnect() + self.hkl_pvs = {} + self.hkl_data = {} self.provider_name.setText('N/A') self.is_connected.setText('Disconnected') + def start_hkl_viewer_clicked(self) -> None: + try: + if self.reader is not None and self.reader.HKL_IN_CONFIG: + cmd = ['python', 'viewer/hkl_3d_viewer.py',] + process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + preexec_fn=os.setsid, + universal_newlines=True + ) + self.processes[process.pid] = process + + except Exception as e: + print(f'[Diffraction Image Viewer] Failed to load HKL Viewer:{e}') + + def save_caches_clicked(self) -> None: + if not self.reader.channel.isMonitorActive(): + if not self.file_writer_thread.isRunning(): + self.file_writer_thread.start() + self.file_writer.save_caches_to_h5(clear_caches=True) + else: + QMessageBox.critical(None, + 'Error', + 'Stop Live View to Save Cache', + QMessageBox.Ok) + + def stats_button_clicked(self) -> None: + """ + Creates a popup dialog for viewing the stats of a specific button. + + This method identifies the button pressed and opens the corresponding stats dialog. + """ + if self.reader is not None: + sending_button = self.sender() + text = sending_button.text() + self.stats_dialogs[text] = RoiStatsDialog(parent=self, + stats_text=text, + timer=self.timer_labels) + self.stats_dialogs[text].show() + def start_stats_monitors(self) -> None: """ Initializes monitors for updating stats values. @@ -308,8 +394,8 @@ def start_stats_monitors(self) -> None: pv = f"{self.reader.stats[stat_num][stat]}" self.stats_data[pv] = caget(pv) camonitor(pvname=pv, callback=self.stats_ca_callback) - except: - print("Failed to Connect to Stats CA Monitors") + except Exception as e: + print(f"[Diffraction Image Viewer] Failed to Connect to Stats CA Monitors: {e}") def stats_ca_callback(self, pvname, value, **kwargs) -> None: """ @@ -322,20 +408,11 @@ def stats_ca_callback(self, pvname, value, **kwargs) -> None: """ self.stats_data[pvname] = value - def stats_button_clicked(self) -> None: - """ - Creates a popup dialog for viewing the stats of a specific button. + def on_writer_finished(self, message) -> None: + print(message) + self.file_writer_thread.quit() + self.file_writer_thread.wait() - This method identifies the button pressed and opens the corresponding stats dialog. - """ - if self.reader is not None: - sending_button = self.sender() - text = sending_button.text() - self.stats_dialog[text] = RoiStatsDialog(parent=self, - stats_text=text, - timer=self.timer_labels) - self.stats_dialog[text].show() - def show_rois_checked(self) -> None: """ Toggles visibility of ROIs based on the checked state of the display checkbox. @@ -408,39 +485,7 @@ def add_rois(self) -> None: self.image_view.addItem(roi) roi.sigRegionChanged.connect(self.update_roi_region) except Exception as e: - print(f'Failed to add ROIs:{e}') - - def start_hkl_viewer(self) -> None: - try: - if self.reader is not None and 'HKL' in self.reader.config: - qx = self.qx.flatten() - qy = self.qy.flatten() - qz = self.qz.flatten() - intensity = self.reader.image.flatten() - - np.save('qx.npy', qx) - np.save('qy.npy', qy) - np.save('qz.npy', qz) - np.save('intensity.npy', intensity) - - # cmd = ['python', 'viewer/hkl_3d_viewer.py', - # '--qx-file', 'qx.npy', - # '--qy-file', 'qy.npy', - # '--qz-file', 'qz.npy', - # '--intensity-file', 'intensity.npy'] - - # process = subprocess.Popen( - # cmd, - # stdout=subprocess.PIPE, - # stderr=subprocess.STDOUT, - # preexec_fn=os.setsid, - # universal_newlines=True - # ) - - # self.processes[process.pid] = process - - except Exception as e: - print(f'Failed to load HKL Viewer:{e}') + print(f'[Diffraction Image Viewer] Failed to add ROIs:{e}') def start_hkl_monitors(self) -> None: @@ -448,16 +493,19 @@ def start_hkl_monitors(self) -> None: Initializes camonitors for HKL values and stores them in a dictionary. """ try: - if "HKL" in self.reader.config: + if self.reader.HKL_IN_CONFIG: self.hkl_config = self.reader.config["HKL"] - - # Monitor each HKL parameter - for section, pv_dict in self.hkl_config.items(): - for key, pv_name in pv_dict.items(): - self.hkl_data[pv_name] = caget(pv_name) - camonitor(pvname=pv_name, callback=self.hkl_ca_callback) + if not self.hkl_pvs: + for section, pv_dict in self.hkl_config.items(): + for section_key, pv_name in pv_dict.items(): + if pv_name not in self.hkl_pvs: + self.hkl_pvs[pv_name] = PV(pvname=pv_name) + for pv_name, pv_obj in self.hkl_pvs.items(): + self.hkl_data[pv_name] = pv_obj.get() # Get current value + pv_obj.add_callback(callback=self.hkl_ca_callback) + self.hkl_data_updated.emit(True) except Exception as e: - print(f"Failed to initialize HKL monitors: {e}") + print(f"[Diffraction Image Viewer] Failed to initialize HKL monitors: {e}") def hkl_ca_callback(self, pvname, value, **kwargs) -> None: """ @@ -470,64 +518,108 @@ def hkl_ca_callback(self, pvname, value, **kwargs) -> None: """ self.hkl_data[pvname] = value if self.qx is not None and self.qy is not None and self.qz is not None: - self.update_rsm() + self.hkl_data_updated.emit(True) - def init_hkl(self) -> None: - """ - Initializes HKL parameters by setting up monitors for each HKL value. - """ - self.start_hkl_monitors() - self.hkl_setup() - + def handle_hkl_data_update(self): + if self.reader is not None and not self.stop_hkl.isChecked() and self.hkl_data: + self.hkl_setup() + if self.q_conv is None: + raise ValueError("QConversion object is not initialized.") + self.update_rsm() + + def hkl_setup(self) -> None: - if self.hkl_config is not None: + if (self.hkl_config is not None) and (not self.stop_hkl.isChecked()): try: # Get everything for the sample circles sample_circle_keys = [pv_name for section, pv_dict in self.hkl_config.items() if section.startswith('SAMPLE_CIRCLE') for pv_name in pv_dict.values()] self.sample_circle_directions = [] - self.sample_cirlce_names = [] + self.sample_circle_names = [] self.sample_circle_positions = [] for pv_key in sample_circle_keys: if pv_key.endswith('DirectionAxis'): - self.sample_circle_directions.append(self.hkl_data[pv_key]) + direction = self.hkl_data.get(pv_key) + if direction is None: + raise ValueError(f"Missing sample circle direction PV data: {pv_key}") + self.sample_circle_directions.append(direction) elif pv_key.endswith('SpecMotorName'): - self.sample_cirlce_names.append(self.hkl_data[pv_key]) + name = self.hkl_data.get(pv_key) + if name is None: + raise ValueError(f"Missing sample circle motor name PV data: {pv_key}") + self.sample_circle_names.append(name) elif pv_key.endswith('Position'): - self.sample_circle_positions.append(self.hkl_data[pv_key]) + position = self.hkl_data.get(pv_key) + if position is None: + raise ValueError(f"Missing sample circle position PV data: {pv_key}") + self.sample_circle_positions.append(position) + # Get everything for the detector circles det_circle_keys = [pv_name for section, pv_dict in self.hkl_config.items() if section.startswith('DETECTOR_CIRCLE') for pv_name in pv_dict.values()] self.det_circle_directions = [] - self.det_cirlce_names = [] + self.det_circle_names = [] self.det_circle_positions = [] for pv_key in det_circle_keys: if pv_key.endswith('DirectionAxis'): - self.det_circle_directions.append(self.hkl_data[pv_key]) + direction = self.hkl_data.get(pv_key) + if direction is None: + raise ValueError(f"Missing detector circle direction PV data: {pv_key}") + self.det_circle_directions.append(direction) elif pv_key.endswith('SpecMotorName'): - self.det_cirlce_names.append(self.hkl_data[pv_key]) + name = self.hkl_data.get(pv_key) + if name is None: + raise ValueError(f"Missing detector circle motor name PV data: {pv_key}") + self.det_circle_names.append(name) elif pv_key.endswith('Position'): - self.det_circle_positions.append(self.hkl_data[pv_key]) + position = self.hkl_data.get(pv_key) + if position is None: + raise ValueError(f"Missing detector circle position PV data: {pv_key}") + self.det_circle_positions.append(position) + # Primary Beam Direction - self.primary_beam_directions = [self.hkl_data[axis_number] for axis_number in self.hkl_config['PRIMARY_BEAM_DIRECTION'].values()] + primary_beam_pvs = self.hkl_config.get('PRIMARY_BEAM_DIRECTION', {}).values() + self.primary_beam_directions = [self.hkl_data.get(axis_number) for axis_number in primary_beam_pvs] + if any(val is None for val in self.primary_beam_directions): + raise ValueError("Missing primary beam direction PV data") + # Inplane Reference Direction - self.inplane_reference_directions = [self.hkl_data[axis_number] for axis_number in self.hkl_config['INPLANE_REFERENCE_DIRECITON'].values()] + inplane_ref_pvs = self.hkl_config.get('INPLANE_REFERENCE_DIRECITON', {}).values() + self.inplane_reference_directions = [self.hkl_data.get(axis_number) for axis_number in inplane_ref_pvs] + if any(val is None for val in self.inplane_reference_directions): + raise ValueError("Missing inplane reference direction PV data") + # Sample Surface Normal Direction - self.sample_surface_normal_directions = [self.hkl_data[axis_number] for axis_number in self.hkl_config['SAMPLE_SURFACE_NORMAL_DIRECITON'].values()] + surface_normal_pvs = self.hkl_config.get('SAMPLE_SURFACE_NORMAL_DIRECITON', {}).values() + self.sample_surface_normal_directions = [self.hkl_data.get(axis_number) for axis_number in surface_normal_pvs] + if any(val is None for val in self.sample_surface_normal_directions): + raise ValueError("Missing sample surface normal direction PV data") + # UB Matrix - self.ub_matrix = self.hkl_data[self.hkl_config['SPEC']['UB_MATRIX_VALUE']] - self.ub_matrix = np.reshape(self.ub_matrix,(3,3)) + ub_matrix_pv = self.hkl_config['SPEC']['UB_MATRIX_VALUE'] + self.ub_matrix = self.hkl_data.get(ub_matrix_pv) + if self.ub_matrix is None or not isinstance(self.ub_matrix, np.ndarray) or self.ub_matrix.size != 9: + raise ValueError("Invalid UB Matrix data") + self.ub_matrix = np.reshape(self.ub_matrix,(3,3)) + # Energy - self.energy = self.hkl_data[self.hkl_config['SPEC']['ENERGY_VALUE']] * 1000 + energy_pv = self.hkl_config['SPEC']['ENERGY_VALUE'] + self.energy = self.hkl_data.get(energy_pv) + if self.energy is None: + raise ValueError("Missing energy PV data") + self.energy *= 1000 + # Make sure all values are setup correctly before instantiating QConversion - if self.sample_circle_directions and self.det_circle_directions and self.primary_beam_directions: - # Class for the conversion of angular coordinates to momentum space + if all([self.sample_circle_directions, self.det_circle_directions, self.primary_beam_directions]): self.q_conv = xu.experiment.QConversion(self.sample_circle_directions, self.det_circle_directions, self.primary_beam_directions) - except Exception as e: - print(f'Error Setting up HKL: {e}') - return - + else: + self.q_conv = None + raise ValueError("QConversion initialization failed due to missing PV data.") + except Exception as e: + print(f'[Diffraction Image Viewer] Error Setting up HKL: {e}') + self.q_conv = None # Reset to None on failure to prevent invalid calculations + def create_rsm(self) -> np.ndarray: """ Creates a reciprocal space map (RSM) from the current detector image. @@ -547,43 +639,48 @@ def create_rsm(self) -> np.ndarray: The conversion uses the current sample and detector angles along with the UB matrix to transform from angular to reciprocal space coordinates. """ - if self.hkl_data: + if self.reader is not None and self.hkl_data and (not self.stop_hkl.isChecked()): try: hxrd = xu.HXRD(self.inplane_reference_directions, self.sample_surface_normal_directions, en=self.energy, qconv=self.q_conv) - if self.stats_data: - if f"{self.reader.pva_prefix}:Stats4:Total_RBV" in self.stats_data: - roi = [0, self.reader.shape[0], 0, self.reader.shape[1]] - cch1 = self.hkl_data['DetectorSetup:CenterChannelPixel'][0] # Center Channel Pixel 1 - cch2 = self.hkl_data['DetectorSetup:CenterChannelPixel'][1] # Center Channel Pixel 2 - distance = self.hkl_data['DetectorSetup:Distance'] # Distance - pixel_dir1 = self.hkl_data['DetectorSetup:PixelDirection1'] # Pixel Direction 1 - pixel_dir2 = self.hkl_data['DetectorSetup:PixelDirection2'] # PIxel Direction 2 - nch1 = self.reader.shape[0] # Number of detector pixels along direction 1 - nch2 = self.reader.shape[1] # Number of detector pixels along direction 2 - pixel_width1 = self.hkl_data['DetectorSetup:Size'][0] / nch1 # width of a pixel along direction 1 - pixel_width2 = self.hkl_data['DetectorSetup:Size'][1] / nch2 # width of a pixel along direction 2 - - hxrd.Ang2Q.init_area(pixel_dir1, pixel_dir2, cch1=cch1, cch2=cch2, - Nch1=nch1, Nch2=nch2, pwidth1=pixel_width1, - pwidth2=pixel_width2, distance=distance, roi=roi) - - angles = [*self.sample_circle_positions, *self.det_circle_positions] - - return hxrd.Ang2Q.area(*angles, UB=self.ub_matrix) + roi = [0, self.reader.shape[0], 0, self.reader.shape[1]] + cch1 = self.hkl_data['DetectorSetup:CenterChannelPixel'][0] # Center Channel Pixel 1 + cch2 = self.hkl_data['DetectorSetup:CenterChannelPixel'][1] # Center Channel Pixel 2 + distance = self.hkl_data['DetectorSetup:Distance'] # Distance + pixel_dir1 = self.hkl_data['DetectorSetup:PixelDirection1'] # Pixel Direction 1 + pixel_dir2 = self.hkl_data['DetectorSetup:PixelDirection2'] # PIxel Direction 2 + nch1 = self.reader.shape[0] # Number of detector pixels along direction 1 + nch2 = self.reader.shape[1] # Number of detector pixels along direction 2 + pixel_width1 = self.hkl_data['DetectorSetup:Size'][0] / nch1 # width of a pixel along direction 1 + pixel_width2 = self.hkl_data['DetectorSetup:Size'][1] / nch2 # width of a pixel along direction 2 + + hxrd.Ang2Q.init_area(pixel_dir1, pixel_dir2, cch1=cch1, cch2=cch2, + Nch1=nch1, Nch2=nch2, pwidth1=pixel_width1, + pwidth2=pixel_width2, distance=distance, roi=roi) + + angles = [*self.sample_circle_positions, *self.det_circle_positions] + + return hxrd.Ang2Q.area(*angles, UB=self.ub_matrix) except Exception as e: - print(f'Error Creating RSM: {e}') + print(f'[Diffration Image Viewer] Error Creating RSM: {e}') return else: return + + def reset_rsm_vars(self) -> None: + self.hkl_data = {} + self.rois.clear() + self.qx = None + self.qy = None + self.qz = None + def update_rois(self) -> None: """ Updates the positions and sizes of ROIs based on changes from the EPICS software. - Loops through the cached ROIs and adjusts their parameters accordingly. """ for roi, roi_dict in zip(self.rois, self.reader.rois.values()): @@ -593,6 +690,7 @@ def update_rois(self) -> None: height = roi_dict.get("SizeY",0) if not(self.image_is_transposed) else roi_dict.get('SizeX',0) roi.setPos(pos=x_pos, y=y_pos) roi.setSize(size=(width, height)) + self.image_view.update() def update_roi_region(self) -> None: """ @@ -617,16 +715,20 @@ def update_mouse_pos(self, pos) -> None: if self.reader is not None: img = self.image_view.getImageItem() q_pointer = img.mapFromScene(pos) - x, y = q_pointer.x(), q_pointer.y() + self.mouse_x, self.mouse_y = int(q_pointer.x()), int(q_pointer.y()) + self.update_mouse_labels() + + def update_mouse_labels(self) -> None: + if self.reader is not None: if self.image is not None: - if 0 <= x < self.image.shape[0] and 0 <= y < self.image.shape[1]: - self.mouse_x_val.setText(f"{x:.3f}") - self.mouse_y_val.setText(f"{y:.3f}") - self.mouse_px_val.setText(f'{self.image[int(x)][int(y)]}') - if self.hkl_data: - self.mouse_h.setText(f'{self.qx[int(x)][int(y)]}') - self.mouse_k.setText(f'{self.qy[int(x)][int(y)]}') - self.mouse_l.setText(f'{self.qz[int(x)][int(y)]}') + if 0 <= self.mouse_x < self.image.shape[0] and 0 <= self.mouse_y < self.image.shape[1]: + self.mouse_x_val.setText(f"{self.mouse_x}") + self.mouse_y_val.setText(f"{self.mouse_y}") + self.mouse_px_val.setText(f'{self.image[self.mouse_x][self.mouse_y]}') + if self.qx is not None and len(self.qx) > 0: + self.mouse_h.setText(f'{self.qx[self.mouse_x][self.mouse_y]:.7f}') + self.mouse_k.setText(f'{self.qy[self.mouse_x][self.mouse_y]:.7f}') + self.mouse_l.setText(f'{self.qz[self.mouse_x][self.mouse_y]:.7f}') def update_labels(self) -> None: """ @@ -640,6 +742,7 @@ def update_labels(self) -> None: self.missed_frames_val.setText(f'{self.reader.frames_missed:d}') self.frames_received_val.setText(f'{self.reader.frames_received:d}') self.plot_call_id.setText(f'{self.call_id_plot:d}') + self.update_mouse_labels() if len(self.reader.shape): self.size_x_val.setText(f'{self.reader.shape[0]:d}') self.size_y_val.setText(f'{self.reader.shape[1]:d}') @@ -651,12 +754,12 @@ def update_labels(self) -> None: self.stats5_total_value.setText(f"{self.stats_data.get(f'{self.reader.pva_prefix}:Stats5:Total_RBV', '0.0')}") def update_rsm(self) -> None: - if self.reader is not None: + if (self.reader is not None) and (not self.stop_hkl.isChecked()): if self.hkl_data: - self.hkl_setup() - self.qx = self.create_rsm()[0].T if self.image_is_transposed else self.create_rsm()[0] - self.qy = self.create_rsm()[1].T if self.image_is_transposed else self.create_rsm()[1] - self.qz = self.create_rsm()[2].T if self.image_is_transposed else self.create_rsm()[2] + qxyz = self.create_rsm() + self.qx = qxyz[0].T if self.image_is_transposed else qxyz[0] + self.qy = qxyz[1].T if self.image_is_transposed else qxyz[1] + self.qz = qxyz[2].T if self.image_is_transposed else qxyz[2] def update_image(self) -> None: """ @@ -667,21 +770,30 @@ def update_image(self) -> None: """ if self.reader is not None: self.call_id_plot +=1 - image = self.reader.image - if image is not None: - self.image = np.rot90(image, k=self.rot_num).T if self.image_is_transposed else np.rot90(image, k=self.rot_num) + if self.reader.CACHING_MODE in ['', 'alignment', 'scan']: + self.image = self.reader.image + elif self.reader.CACHING_MODE == 'bin': + index = self.slider.value() + self.image = np.reshape(np.mean(np.stack(self.reader.cached_images[index]), axis=0), self.reader.shape) # (self.reader.shape) + + if self.image is not None: + self.image = np.transpose(self.image) if self.image_is_transposed else self.image + self.image = np.rot90(m=self.image, k=self.rot_num) if len(self.image.shape) == 2: min_level, max_level = np.min(self.image), np.max(self.image) if self.log_image.isChecked(): - self.image = np.log1p(self.image + 1) - min_level = np.log1p(min_level + 1) - max_level = np.log1p(max_level + 1) + # Ensure non negative values + self.image = np.maximum(self.image, 0) + epsilon = 1e-10 + self.image = np.log10(self.image + 1) + min_level = np.log10(max(min_level, epsilon) + 1) + max_level = np.log10(max_level + 1) if self.first_plot: self.image_view.setImage(self.image, - autoRange=False, - autoLevels=False, - levels=(min_level, max_level), - autoHistogramRange=False) + autoRange=False, + autoLevels=False, + levels=(min_level, max_level), + autoHistogramRange=False) # Auto sets the max value based on first incoming image self.max_setting_val.setValue(max_level) self.min_setting_val.setValue(min_level) @@ -693,8 +805,8 @@ def update_image(self) -> None: autoHistogramRange=False) # Separate image update for horizontal average plot self.horizontal_avg_plot.plot(x=np.mean(self.image, axis=0), - y=np.arange(0,self.image.shape[1]), - clear=True) + y=np.arange(self.image.shape[1]), + clear=True) self.min_px_val.setText(f"{min_level:.2f}") self.max_px_val.setText(f"{max_level:.2f}") @@ -714,13 +826,20 @@ def closeEvent(self, event): Args: event (QCloseEvent): The close event triggered when the main window is closed. """ - del self.stats_dialog # otherwise dialogs stay in memory - super(ImageWindow,self).closeEvent(event) + del self.stats_dialogs # otherwise dialogs stay in memory + if self.file_writer_thread.isRunning(): + self.file_writer_thread.quit() + self.file_writer_thread + super(DiffractionImageWindow,self).closeEvent(event) - -if __name__ == '__main__': +def main(): app = QApplication(sys.argv) + # size_manager = SizeManager(app=app) window = ConfigDialog() window.show() - sys.exit(app.exec_()) \ No newline at end of file + sys.exit(app.exec_()) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/viewer/base_window.py b/viewer/base_window.py new file mode 100644 index 0000000..8e2891d --- /dev/null +++ b/viewer/base_window.py @@ -0,0 +1,409 @@ +#!/usr/bin/env python3 +""" +Base Window Class +A base class for all main window interfaces in the DashPVA application. +Provides common functionality and consistent behavior across all windows. +""" + +import sys +import os +from pathlib import Path +from PyQt5.QtWidgets import QMainWindow, QFileDialog, QMessageBox, QAction, QMenuBar, QLabel +from PyQt5.QtCore import pyqtSignal, Qt, QTimer +from PyQt5 import uic +from database import DatabaseInterface +from viewer.documentation.dialog import DocumentationDialog +import time, subprocess, shutil + +di = DatabaseInterface() + +# Add the project root to the Python path +project_root = Path(__file__).parent.parent + +class BaseWindow(QMainWindow): + """ + Base class for all main windows in the DashPVA application. + Provides common functionality like file operations, UI loading, and standard menu actions. + """ + + # Signals + file_opened = pyqtSignal(str) # Emitted when a file is opened + file_saved = pyqtSignal(str) # Emitted when a file is saved + + def __init__(self, ui_file_name=None, viewer_name=None): + """ + Initialize the base window. + + Args: + ui_file_name (str): Name of the UI file to load (without path) + viewer_name (str, optional): Human-friendly name of the viewer for status messages + """ + super().__init__() + self.ui_file_name = ui_file_name + self.current_file_path = None + self.viewer_name = viewer_name + + if ui_file_name: + self.load_ui() + + self.setup_base_connections() + + # CPU, GPU, runtime in status bar + self.init_perf_statusbar() + + def load_ui(self): + """Load the UI file for this window.""" + if not self.ui_file_name: + return + + ui_file = project_root / "gui" / self.ui_file_name + if ui_file.exists(): + uic.loadUi(str(ui_file), self) + else: + QMessageBox.critical(self, "Error", f"UI file not found: {ui_file}") + sys.exit(1) + + def setup_base_connections(self): + """Set up connections for standard menu actions.""" + # Only connect if the actions exist (they should from base_mainwindow.ui) + if hasattr(self, 'actionOpen'): + self.actionOpen.triggered.connect(self.open_file) + if hasattr(self, 'actionOpenFolder'): + self.actionOpenFolder.triggered.connect(self.open_folder) + if hasattr(self, 'actionSave'): + self.actionSave.triggered.connect(self.save_file) + if hasattr(self, 'actionExit'): + self.actionExit.triggered.connect(self.close) + # Documentation menu (wired here; content discovery handled by DocumentationDialog) + if hasattr(self, 'actionOpenDocumentation'): + self.actionOpenDocumentation.triggered.connect(self.open_documentation) + + # Ensure a 'Windows' menu exists globally for docks to use + try: + self.ensure_windows_menu() + except Exception: + pass + + def ensure_windows_menu(self): + """Create the 'Windows' menu on the menu bar if it doesn't already exist, and return it.""" + # Acquire a QMenuBar without calling self.menuBar() directly to avoid shadowing issues + mbar = None + try: + mb_attr = getattr(self, 'menuBar', None) + # If UI provided a QMenuBar attribute, use it + if isinstance(mb_attr, QMenuBar): + mbar = mb_attr + else: + # Fallback to class-qualified method to avoid any overrides or shadowing + mbar = QMainWindow.menuBar(self) + except Exception: + mbar = None + + # Strict type check before proceeding + if not isinstance(mbar, QMenuBar): + return None + + # Try to find an existing 'Windows' menu + windows_menu = None + try: + for act in mbar.actions(): + m = act.menu() + if m is not None and m.title() == "Windows": + windows_menu = m + break + except Exception: + windows_menu = None + + # Create if missing, then store and return + if windows_menu is None: + try: + from PyQt5.QtWidgets import QMenu + windows_menu = QMenu("Windows", self) + mbar.addMenu(windows_menu) + except Exception: + windows_menu = None + + self.windows_menu = windows_menu + return windows_menu + + def get_windows_menu(self): + """Return the Windows menu, creating it if necessary.""" + return self.ensure_windows_menu() + + def ensure_windows_submenu(self, segment_key=None): + """Ensure and return a submenu under 'Windows' titled by normalized segment_key. + Normalization: (segment_key or "").strip().lower(); empty -> 'other'. + Keeps the 'other' submenu as the bottom-most entry within the Windows menu. + """ + windows_menu = self.ensure_windows_menu() + if windows_menu is None: + return None + key = (segment_key or "").strip().lower() + if not key: + key = "other" + if not hasattr(self, '_windows_submenus'): + self._windows_submenus = {} + submenu = self._windows_submenus.get(key) + if submenu is None: + from PyQt5.QtWidgets import QMenu + submenu = QMenu(key, self) + windows_menu.addMenu(submenu) + self._windows_submenus[key] = submenu + # Keep 'other' at bottom by moving its action to end if it exists + other = self._windows_submenus.get('other') + if other is not None: + for act in list(windows_menu.actions()): + if act.menu() is other: + windows_menu.removeAction(act) + windows_menu.addMenu(other) + break + return submenu + + + def add_dock_toggle_action(self, dock, title: str, segment_name=None): + """Add a checkable action for the given dock under a segmented Windows submenu and wire visibility sync. + Segment normalization: (segment_name or "").strip().lower(); empty -> "other". + """ + windows_menu = self.ensure_windows_menu() + if windows_menu is None: + return None + submenu = self.ensure_windows_submenu(segment_name) + action = QAction(str(title), self) + action.setCheckable(True) + action.setChecked(dock.isVisible()) + action.toggled.connect(lambda checked: dock.setVisible(bool(checked))) + dock.visibilityChanged.connect(lambda visible: action.setChecked(bool(visible))) + submenu.addAction(action) + return action + + def add_windows_menu_action(self, action, segment_name=None): + """Add an arbitrary action to the segmented Windows menu under the specified submenu.""" + submenu = self.ensure_windows_submenu(segment_name) + if submenu is not None and action is not None: + submenu.addAction(action) + return action + + def open_file(self): + """ + Handle File -> Open action. + Shows file dialog and calls load_file_content if a file is selected. + """ + file_path, _ = QFileDialog.getOpenFileName( + self, + "Open File", + "", + self.get_file_filters() + ) + if file_path: + self.current_file_path = file_path + self.load_file_content(file_path) + self.file_opened.emit(file_path) + + def save_file(self): + """ + Handle File -> Save action. + Shows file dialog and calls save_file_content if a path is selected. + """ + file_path, _ = QFileDialog.getSaveFileName( + self, + "Save File", + "", + self.get_file_filters() + ) + if file_path: + self.current_file_path = file_path + self.save_file_content(file_path) + self.file_saved.emit(file_path) + + def get_file_filters(self): + """ + Get file filters for open/save dialogs. + Override in subclasses to provide specific file types. + + Returns: + str: File filter string for QFileDialog + """ + return "All Files (*)" + + def load_file_content(self, file_path): + """ + Load content from the specified file. + Override in subclasses to implement specific loading logic. + + Args: + file_path (str): Path to the file to load + """ + # Base implementation - override in subclasses + self.update_status(f"Loaded: {os.path.basename(file_path)}") + + def open_folder(self): + """ + Handle File -> Open Folder action. + Shows folder dialog and calls load_folder_content if a folder is selected. + Override in subclasses to implement specific folder loading logic. + """ + folder_path = QFileDialog.getExistingDirectory( + self, + "Select Folder", + "" + ) + if folder_path: + self.load_folder_content(folder_path) + + def load_folder_content(self, folder_path): + """ + Load content from the specified folder. + Override in subclasses to implement specific folder loading logic. + + Args: + folder_path (str): Path to the folder to load + """ + # Base implementation - override in subclasses + self.update_status(f"Opened folder: {os.path.basename(folder_path)}") + + def save_file_content(self, file_path): + """ + Save content to the specified file. + Override in subclasses to implement specific saving logic. + + Args: + file_path (str): Path to save the file to + """ + # Base implementation - override in subclasses + self.update_status(f"Saved: {os.path.basename(file_path)}") + + def open_documentation(self): + """Open the documentation dialog without disabling the main window (modeless).""" + try: + # Keep a reference so it isn't garbage collected + self._documentation_dialog = DocumentationDialog(self) + dlg = self._documentation_dialog + # Delete dialog on close to avoid leaks + try: + dlg.setAttribute(Qt.WA_DeleteOnClose, True) + except Exception: + pass + # Auto-discovery is handled by the dialog; pass this viewer instance + dlg.open_for_viewer(self) + dlg.setWindowTitle("Documentation") + dlg.resize(900, 700) + # Show modeless, do not block/disable main window + dlg.show() + except Exception as e: + self.update_status(f"Failed to open documentation: {e}", level='error') + + def update_status(self, message, level: str = 'info', source: str = None): + """ + Update the status label with a message and include the viewer name. + Also logs error-like messages to error_output.txt with viewer context. + + Args: + message (str): Status message to display + level (str): Optional level ('info', 'warning', 'error') + source (str): Optional override for viewer/source name; defaults to self.viewer_name + """ + # Compose prefixed message + src = source or getattr(self, 'viewer_name', None) or self.__class__.__name__ + prefix = f"[{src}] " + full_msg = f"{prefix}{message}" if isinstance(message, str) else message + + # Update status label if it exists + if hasattr(self, 'label_status'): + try: + self.label_status.setText(full_msg) + except Exception: + # Fallback to plain message if label does not accept complex types + self.label_status.setText(str(message)) + + # Log to error_output.txt if message indicates an error or failure + try: + import datetime + error_file = project_root / "error_output.txt" + should_log = False + if isinstance(message, str): + msg_lower = message.lower() + should_log = (level in ('error', 'warning')) or ('error' in msg_lower) or ('failed' in msg_lower) + if should_log: + with open(error_file, "a") as f: + f.write(f"[{datetime.datetime.now().isoformat()}] {prefix}{message}\n") + except Exception: + # Avoid raising during logging + pass + + def init_perf_statusbar(self): + sb = QMainWindow.statusBar(self) + self._cpu_label = QLabel("CPU: -%") + self._gpu_label = QLabel("GPU: N/A") + self._runtime_label = QLabel("Runtime: 0s") + sb.addPermanentWidget(self._cpu_label) + sb.addPermanentWidget(self._gpu_label) + sb.addPermanentWidget(self._runtime_label) + self._start_time = time.monotonic() + self._cpu_prev = None + self._perf_timer = QTimer(self) + self._perf_timer.setInterval(1000) + self._perf_timer.timeout.connect(self._update_perf_labels) + self._perf_timer.start() + + def _update_perf_labels(self): + # CPU + with open("/proc/stat", "r") as f: + parts = f.readline().split() + vals = list(map(int, parts[1:])) + idle = vals[3] + (vals[4] if len(vals) > 4 else 0) + total = sum(vals[:8]) if len(vals) >= 8 else sum(vals) + if self._cpu_prev is not None: + ptotal, pidle = self._cpu_prev + dt = total - ptotal + didle = idle - pidle + if dt > 0: + percent = (dt - didle) * 100.0 / dt + self._cpu_label.setText(f"CPU: {percent:.0f}%") + self._cpu_prev = (total, idle) + + # GPU + gpu_text = "GPU: N/A" + smi = shutil.which("nvidia-smi") + if smi is not None: + res = subprocess.run([smi, "--query-gpu=utilization.gpu", "--format=csv,noheader,nounits"], capture_output=True, text=True) + lines = res.stdout.strip().splitlines() + if lines: + val = lines[0].strip() + if val.isdigit(): + gpu_text = f"GPU: {int(val)}%" + self._gpu_label.setText(gpu_text) + + # Runtime + elapsed = int(time.monotonic() - self._start_time) + self._runtime_label.setText(f"Runtime: {elapsed}s") + + def setup_window_properties(self, title, width=800, height=600): + """ + Set up basic window properties. + + Args: + title (str): Window title + width (int): Window width + height (int): Window height + """ + self.setWindowTitle(title) + # If no explicit viewer_name was set, derive a friendly name from the title + if not getattr(self, 'viewer_name', None) and isinstance(title, str): + # Use the segment before the first ' - ' as the viewer name + self.viewer_name = title.split(' - ')[0].strip() or self.__class__.__name__ + self.resize(width, height) + + def closeEvent(self, event): + """ + Handle window close event. + Override in subclasses to add custom close behavior. + """ + # Base implementation - just accept the close event + event.accept() + + def set_viewer_name(self, name: str) -> None: + """Set or update the viewer name used in status messages.""" + try: + self.viewer_name = str(name) if name else None + except Exception: + self.viewer_name = None diff --git a/viewer/controls/__init__.py b/viewer/controls/__init__.py new file mode 100644 index 0000000..c403da1 --- /dev/null +++ b/viewer/controls/__init__.py @@ -0,0 +1,2 @@ +# Dimension-specific controls package for DashPVA viewers +# Exposes Controls1D, Controls2D, Controls3D classes diff --git a/viewer/controls/controls_1d.py b/viewer/controls/controls_1d.py new file mode 100644 index 0000000..f7d633d --- /dev/null +++ b/viewer/controls/controls_1d.py @@ -0,0 +1,29 @@ +""" +1D Controls wiring for Workbench and other viewers. +Encapsulates signal connections for 1D-specific UI elements. +Currently minimal; placeholder for future 1D-specific controls. +""" + +from typing import Optional + + +class Controls1D: + def __init__(self, main_window): + self.main = main_window + + def setup(self) -> None: + """Wire up 1D controls to main window handlers. + This is intentionally light since the app currently has no dedicated 1D controls. + """ + try: + # Placeholder for future 1D controls (levels, scale, smoothing, etc.) + # Example wiring (when widgets exist in the UI): + # if hasattr(self.main, 'cb1DAutoScale'): + # self.main.cb1DAutoScale.toggled.connect(self.main.on_1d_auto_scale_toggled) + pass + except Exception as e: + # Reuse Workbench status reporter + try: + self.main.update_status(f"Error setting up 1D controls: {e}") + except Exception: + pass diff --git a/viewer/controls/controls_2d.py b/viewer/controls/controls_2d.py new file mode 100644 index 0000000..9e4f274 --- /dev/null +++ b/viewer/controls/controls_2d.py @@ -0,0 +1,118 @@ +""" +2D Controls wiring for Workbench and other viewers. +Encapsulates signal connections for 2D-specific UI elements. +""" + +from typing import Optional +from PyQt5.QtWidgets import QLabel, QHBoxLayout + + +class Controls2D: + def __init__(self, main_window): + self.main = main_window + self._roi_stats_label_added = False + + def setup(self) -> None: + """Wire up 2D controls to main window handlers.""" + try: + # Colormap selection + if hasattr(self.main, 'cbColorMapSelect_2d'): + self.main.cbColorMapSelect_2d.currentTextChanged.connect(self.main.on_colormap_changed) + + # Auto levels checkbox + if hasattr(self.main, 'cbAutoLevels'): + self.main.cbAutoLevels.toggled.connect(self.main.on_auto_levels_toggled) + + # Frame navigation controls + if hasattr(self.main, 'btn_prev_frame'): + self.main.btn_prev_frame.clicked.connect(self.main.previous_frame) + if hasattr(self.main, 'btn_next_frame'): + self.main.btn_next_frame.clicked.connect(self.main.next_frame) + if hasattr(self.main, 'frame_spinbox'): + self.main.frame_spinbox.valueChanged.connect(self.main.on_frame_spinbox_changed) + + # Speckle analysis & intensity controls + if hasattr(self.main, 'cbLogScale'): + self.main.cbLogScale.toggled.connect(self.main.on_log_scale_toggled) + if hasattr(self.main, 'sbVmin'): + self.main.sbVmin.valueChanged.connect(self.main.on_vmin_changed) + if hasattr(self.main, 'sbVmax'): + self.main.sbVmax.valueChanged.connect(self.main.on_vmax_changed) + + # ROI drawing + if hasattr(self.main, 'btnDrawROI'): + self.main.btnDrawROI.clicked.connect(self.main.on_draw_roi_clicked) + + # Reference/Other frame selection for speckle compare + if hasattr(self.main, 'sbRefFrame'): + self.main.sbRefFrame.valueChanged.connect(self.main.on_ref_frame_changed) + if hasattr(self.main, 'sbOtherFrame'): + self.main.sbOtherFrame.valueChanged.connect(self.main.on_other_frame_changed) + + # Analyze speckle + if hasattr(self.main, 'btnAnalyzeSpeckle'): + self.main.btnAnalyzeSpeckle.clicked.connect(self.main.on_analyze_speckle_clicked) + + # Plot motor positions + if hasattr(self.main, 'btnPlotMotorPositions'): + self.main.btnPlotMotorPositions.clicked.connect(self.main.on_plot_motor_positions_clicked) + + # Playback controls wiring (ensure timer exists and connect buttons) + try: + from PyQt5.QtCore import QTimer + if not hasattr(self.main, 'play_timer') or self.main.play_timer is None: + self.main.play_timer = QTimer(self.main) + try: + self.main.play_timer.timeout.connect(self.main._advance_frame_playback) + print("[PLAYBACK][Controls2D] Created play_timer and wired timeout") + except Exception as e: + print(f"[PLAYBACK][Controls2D] ERROR wiring timer: {e}") + + if hasattr(self.main, 'btn_play'): + try: + self.main.btn_play.clicked.connect(self.main.start_playback) + print("[PLAYBACK][Controls2D] Wired btn_play -> start_playback") + except Exception as e: + print(f"[PLAYBACK][Controls2D] ERROR wiring btn_play: {e}") + else: + print("[PLAYBACK][Controls2D] btn_play not found on main") + + if hasattr(self.main, 'btn_pause'): + try: + self.main.btn_pause.clicked.connect(self.main.pause_playback) + print("[PLAYBACK][Controls2D] Wired btn_pause -> pause_playback") + except Exception as e: + print(f"[PLAYBACK][Controls2D] ERROR wiring btn_pause: {e}") + else: + print("[PLAYBACK][Controls2D] btn_pause not found on main") + + if hasattr(self.main, 'sb_fps'): + try: + self.main.sb_fps.valueChanged.connect(self.main.on_fps_changed) + print("[PLAYBACK][Controls2D] Wired sb_fps -> on_fps_changed") + except Exception as e: + print(f"[PLAYBACK][Controls2D] ERROR wiring sb_fps: {e}") + else: + print("[PLAYBACK][Controls2D] sb_fps not found on main") + except Exception as e: + try: + print(f"[PLAYBACK][Controls2D] ERROR in playback wiring block: {e}") + except Exception: + pass + + # Add ROI Stats label at the bottom of 2D controls once + if hasattr(self.main, 'layout_2d_controls_main') and not hasattr(self.main, 'roi_stats_label'): + try: + self.main.roi_stats_label = QLabel("ROI Stats: -") + self.main.roi_stats_label.setStyleSheet("color: #2c3e50; font-size: 11px;") + hbox = QHBoxLayout() + hbox.addWidget(self.main.roi_stats_label) + self.main.layout_2d_controls_main.addLayout(hbox) + self._roi_stats_label_added = True + except Exception: + pass + except Exception as e: + try: + self.main.update_status(f"Error setting up 2D connections: {e}") + except Exception: + pass diff --git a/viewer/controls/controls_3d.py b/viewer/controls/controls_3d.py new file mode 100644 index 0000000..43d09cc --- /dev/null +++ b/viewer/controls/controls_3d.py @@ -0,0 +1,47 @@ +""" +3D Controls wiring for Workbench and other viewers. +Encapsulates signal connections for 3D-specific UI elements. +""" + +from typing import Optional + + +class Controls3D: + def __init__(self, main_window): + self.main = main_window + + def setup(self) -> None: + """Wire up 3D controls to main window handlers.""" + try: + # Load data button + if hasattr(self.main, 'btn_load_3d_data'): + self.main.btn_load_3d_data.clicked.connect(self.main.load_3d_data) + + # Colormap selection + if hasattr(self.main, 'cb_colormap_3d'): + self.main.cb_colormap_3d.currentTextChanged.connect(self.main.on_3d_colormap_changed) + + # Visibility checkboxes + if hasattr(self.main, 'cb_show_volume'): + self.main.cb_show_volume.toggled.connect(self.main.toggle_3d_volume) + if hasattr(self.main, 'cb_show_slice'): + self.main.cb_show_slice.toggled.connect(self.main.toggle_3d_slice) + if hasattr(self.main, 'cb_show_pointer'): + self.main.cb_show_pointer.toggled.connect(self.main.toggle_3d_pointer) + + # Intensity spinboxes + if hasattr(self.main, 'sb_min_intensity_3d'): + self.main.sb_min_intensity_3d.editingFinished.connect(self.main.update_3d_intensity) + if hasattr(self.main, 'sb_max_intensity_3d'): + self.main.sb_max_intensity_3d.editingFinished.connect(self.main.update_3d_intensity) + + # Slice controls + if hasattr(self.main, 'cb_slice_orientation'): + self.main.cb_slice_orientation.currentTextChanged.connect(self.main.change_slice_orientation) + if hasattr(self.main, 'btn_reset_slice'): + self.main.btn_reset_slice.clicked.connect(self.main.reset_3d_slice) + except Exception as e: + try: + self.main.update_status(f"Error setting up 3D connections: {e}") + except Exception: + pass diff --git a/viewer/documentation/dialog.py b/viewer/documentation/dialog.py new file mode 100644 index 0000000..4f76af7 --- /dev/null +++ b/viewer/documentation/dialog.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python3 +""" +Reusable Documentation Dialog (viewer/documentation/dialog.py) + +Provides a simple dialog to display viewer-specific documentation. +- Prefers rendering local HTML files (index.html) using QWebEngineView if available +- Falls back to QTextBrowser with basic HTML rendering +- Optionally converts Markdown (README.md) to HTML if the 'markdown' package is installed +- Auto-discovers documentation paths relative to the viewer's module file + +Expected doc locations per viewer module: + /doc/index.html (preferred) + /doc/README.md (fallback, converted to HTML if possible) + +Viewers may override discovery by setting an attribute 'doc_path' on their window +(e.g., self.doc_path = "/absolute/or/relative/path/to/index.html"). +""" + +from pathlib import Path +import inspect + +from PyQt5.QtWidgets import QDialog, QVBoxLayout +from PyQt5.QtCore import QUrl + +# Try to use QWebEngineView for full HTML support (CSS/JS). Fallback to QTextBrowser. +try: + from PyQt5.QtWebEngineWidgets import QWebEngineView # type: ignore + WEBENGINE_AVAILABLE = True +except Exception: + from PyQt5.QtWidgets import QTextBrowser + QWebEngineView = None # type: ignore + WEBENGINE_AVAILABLE = False + +# Optional markdown support +try: + import markdown # type: ignore + MARKDOWN_AVAILABLE = True +except Exception: + markdown = None # type: ignore + MARKDOWN_AVAILABLE = False + + +class DocumentationDialog(QDialog): + """Dialog to render documentation content (HTML or converted Markdown).""" + + def __init__(self, parent=None): + super().__init__(parent) + self.setWindowTitle("Documentation") + self.resize(900, 700) + + layout = QVBoxLayout(self) + if WEBENGINE_AVAILABLE: + self.view = QWebEngineView(self) + else: + # Basic HTML rendering (no JS/CSS external files) using QTextBrowser + self.view = QTextBrowser(self) # type: ignore + layout.addWidget(self.view) + + def set_content_html(self, html: str) -> None: + """Render raw HTML in the dialog.""" + if WEBENGINE_AVAILABLE: + self.view.setHtml(html) # QWebEngineView + else: + # QTextBrowser + self.view.setHtml(html) + + def load_html_file(self, file_path: str) -> None: + """Load and render a local HTML file by path.""" + p = Path(file_path) + if WEBENGINE_AVAILABLE: + # Load file via URL for QWebEngineView + url = QUrl.fromLocalFile(str(p.resolve())) + self.view.setUrl(url) + else: + # Read file and render HTML as text + try: + html = p.read_text(encoding="utf-8") + except Exception: + html = f"

Unable to read HTML file:

{p}
" + self.view.setHtml(html) + + @staticmethod + def _discover_doc_candidates_for(viewer_widget) -> dict: + """ + Return a dict of candidate documentation sources for a viewer widget: + { + 'index_html': Path | None, + 'readme_md': Path | None, + } + """ + # Allow explicit override via attribute on the viewer + try: + override = getattr(viewer_widget, 'doc_path', None) + if override: + override_path = Path(override) + # If override points to a directory, assume index.html inside + if override_path.is_dir(): + override_path = override_path / 'index.html' + return { + 'index_html': override_path if override_path.suffix.lower() == '.html' else None, + 'readme_md': override_path if override_path.suffix.lower() in ('.md', '.markdown') else None, + } + except Exception: + pass + + # Discover relative to the viewer's class/module file + try: + mod_file = inspect.getfile(viewer_widget.__class__) + except Exception: + mod_file = None + index_html = None + readme_md = None + if mod_file: + mod_dir = Path(mod_file).parent + doc_dir = mod_dir / 'doc' + idx = doc_dir / 'index.html' + md = doc_dir / 'README.md' + if idx.exists(): + index_html = idx + if md.exists(): + readme_md = md + return {'index_html': index_html, 'readme_md': readme_md} + + def open_for_viewer(self, viewer_widget) -> None: + """ + Auto-discover documentation next to the viewer module and display it. + Preference order: + 1. /doc/index.html (full HTML) + 2. /doc/README.md (converted to HTML if markdown available) + 3. Fallback placeholder HTML + """ + candidates = self._discover_doc_candidates_for(viewer_widget) + idx = candidates.get('index_html') + md = candidates.get('readme_md') + + if idx is not None: + self.load_html_file(str(idx)) + return + + if md is not None: + try: + md_text = Path(md).read_text(encoding='utf-8') + except Exception: + md_text = f"# Documentation\n\nUnable to read file: {md}" + if MARKDOWN_AVAILABLE: + html = markdown.markdown(md_text, extensions=[ + 'fenced_code', 'tables', 'toc' + ]) + else: + # Minimal conversion: wrap in
 if markdown not available
+                html = f"
{md_text}
" + # Basic styling + styled_html = ( + "" + html + "" + ) + self.set_content_html(styled_html) + return + + # Fallback: show a helpful placeholder + viewer_name = getattr(viewer_widget, 'viewer_name', viewer_widget.__class__.__name__) + placeholder = f""" + + + +
{viewer_name} Documentation
+
No documentation found.
+

Create one of the following files next to the viewer module:

+
    +
  • doc/index.html (preferred)
  • +
  • doc/README.md (fallback, converted to HTML)
  • +
+
Tip: Put files under the module's directory, e.g., viewer/workbench/doc/index.html
+ + + """ + self.set_content_html(placeholder) diff --git a/viewer/hkl_3d_slice_window.py b/viewer/hkl_3d_slice_window.py new file mode 100644 index 0000000..c9eaef1 --- /dev/null +++ b/viewer/hkl_3d_slice_window.py @@ -0,0 +1,1140 @@ +import sys +import numpy as np +import pyvista as pyv +from pyvistaqt import QtInteractor +from PyQt5 import uic +from PyQt5.QtCore import Qt +from PyQt5.QtWidgets import ( + QApplication, + QMainWindow, + QFileDialog, + QMessageBox, + QSizePolicy, +) +import pathlib +sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) +from utils import SizeManager, RSMConverter +from utils.hdf5_loader import HDF5Loader + + +class HKL3DSliceWindow(QMainWindow): + """3D Slice window (point-only viewer). + + Point-only rendering with plane slicing: + - No volume/grid interpolation; pure point-cloud rendering + - Plane-based slice using a tolerance around the plane + - Axes, intensity range sliders, camera presets, colormap selection + - Data loading via RSMConverter.load_h5_to_3d + - Extract slice saves slice points projected to a 2D slice dataset + """ + + def __init__(self, parent=None): + super(HKL3DSliceWindow, self).__init__() + self.parent = parent + uic.loadUi('gui/hkl_3d_slice_window.ui', self) + + # Initial UI availability + try: + self.actionSave.setEnabled(False) # volume-save removed + self.actionExtractSlice.setEnabled(False) + self._set_slice_controls_enabled(False) + except Exception: + pass + self.setWindowTitle('3D Slice') + pyv.set_plot_theme('dark') + + # Hook up controls + try: + if hasattr(self, 'actionSlice'): + self.actionSlice.triggered.connect(lambda: self.open_controls_dialog(focus='slice')) + except Exception: + pass + + # Toggles and controls (align naming/behavior with viewer/hkl_3d.py) + self.cbToggleSlicePointer.clicked.connect(self.toggle_pointer) + if hasattr(self, 'cbTogglePoints'): + try: + self.cbTogglePoints.clicked.connect(self.toggle_cloud_vol) + except Exception: + pass + if hasattr(self, 'cbLockSlice'): + try: + self.cbLockSlice.clicked.connect(self.toggle_slice_lock) + except Exception: + pass + self.cbColorMapSelect.currentIndexChanged.connect(self.change_color_map) + self.sbMinIntensity.editingFinished.connect(self.update_intensity) + self.sbMaxIntensity.editingFinished.connect(self.update_intensity) + + # Actions + self.actionLoadData.triggered.connect(self.load_data) + self.actionExtractSlice.triggered.connect(self.extract_slice) + + # State + self.cloud_mesh = None + # Actor handles follow naming used in viewer/hkl_3d.py + self.points_actor = None # actor for the full cloud (name: "cloud_volume") + self.slab = None # extracted slice points + self.slab_actor = None # actor for slice points (name: "slab_points") + self._plane_widget = None + self._slice_locked = False + self._plane_normal = None + self._plane_origin = None + self._slice_lock_text_actor = None + self.orig_shape = (0, 0) + self.curr_shape = (0, 0) + self.num_images = 0 + self.current_file_path = None + + # Slice/camera state + self._slice_translate_step = 0.01 + self._slice_rotate_step_deg = 1.0 + self._zoom_step = 1.5 + self._cam_pos_selection = None + self._slice_orientation_selection = None + self._custom_normal = [0.0, 0.0, 1.0] + + # LUTs + self.lut = pyv.LookupTable(cmap='jet') + self.lut.apply_opacity([0, 1]) + self.lut2 = pyv.LookupTable(cmap='jet') + self.lut2.apply_opacity([0, 1]) + + # Plotter + self.plotter = QtInteractor() + try: + self.plotter.add_axes(xlabel='H', ylabel='K', zlabel='L', x_color='red', y_color='green', z_color='blue') + except Exception: + self.plotter.add_axes(xlabel='H', ylabel='K', zlabel='L') + self.plotter.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) + self.plotter.setMinimumSize(300, 300) + self.viewer_3d_slicer_layout.addWidget(self.plotter, 1, 1) + + # Wire slice/camera controls from UI + if hasattr(self, 'sbSliceTranslateStep'): + self.sbSliceTranslateStep.valueChanged.connect(self._on_translate_step_changed) + if hasattr(self, 'sbSliceRotateStep'): + self.sbSliceRotateStep.valueChanged.connect(self._on_rotate_step_changed) + if hasattr(self, 'cbSliceOrientation'): + self.cbSliceOrientation.currentIndexChanged.connect(self._on_orientation_changed) + if hasattr(self, 'sbNormH'): + self.sbNormH.editingFinished.connect(self._on_custom_normal_changed) + if hasattr(self, 'sbNormK'): + self.sbNormK.editingFinished.connect(self._on_custom_normal_changed) + if hasattr(self, 'sbNormL'): + self.sbNormL.editingFinished.connect(self._on_custom_normal_changed) + + if hasattr(self, 'btnSliceUpNormal'): + self.btnSliceUpNormal.clicked.connect(lambda: self.nudge_along_normal(+1)) + if hasattr(self, 'btnSliceDownNormal'): + self.btnSliceDownNormal.clicked.connect(lambda: self.nudge_along_normal(-1)) + if hasattr(self, 'btnSlicePosH'): + self.btnSlicePosH.clicked.connect(lambda: self.nudge_along_axis('H', +1)) + if hasattr(self, 'btnSliceNegH'): + self.btnSliceNegH.clicked.connect(lambda: self.nudge_along_axis('H', -1)) + if hasattr(self, 'btnSlicePosK'): + self.btnSlicePosK.clicked.connect(lambda: self.nudge_along_axis('K', +1)) + if hasattr(self, 'btnSliceNegK'): + self.btnSliceNegK.clicked.connect(lambda: self.nudge_along_axis('K', -1)) + if hasattr(self, 'btnSlicePosL'): + self.btnSlicePosL.clicked.connect(lambda: self.nudge_along_axis('L', +1)) + if hasattr(self, 'btnSliceNegL'): + self.btnSliceNegL.clicked.connect(lambda: self.nudge_along_axis('L', -1)) + if hasattr(self, 'btnRotPlusH'): + self.btnRotPlusH.clicked.connect(lambda: self.rotate_about_axis('H', +self._slice_rotate_step_deg)) + if hasattr(self, 'btnRotMinusH'): + self.btnRotMinusH.clicked.connect(lambda: self.rotate_about_axis('H', -self._slice_rotate_step_deg)) + if hasattr(self, 'btnRotPlusK'): + self.btnRotPlusK.clicked.connect(lambda: self.rotate_about_axis('K', +self._slice_rotate_step_deg)) + if hasattr(self, 'btnRotMinusK'): + self.btnRotMinusK.clicked.connect(lambda: self.rotate_about_axis('K', -self._slice_rotate_step_deg)) + if hasattr(self, 'btnRotPlusL'): + self.btnRotPlusL.clicked.connect(lambda: self.rotate_about_axis('L', +self._slice_rotate_step_deg)) + if hasattr(self, 'btnRotMinusL'): + self.btnRotMinusL.clicked.connect(lambda: self.rotate_about_axis('L', -self._slice_rotate_step_deg)) + if hasattr(self, 'btnResetSlice'): + self.btnResetSlice.clicked.connect(self._on_reset_slice) + + # Dialogs + def open_controls_dialog(self, focus=None): + try: + if hasattr(self, 'controls_dialog') and self.controls_dialog is not None and self.controls_dialog.isVisible(): + try: + self.controls_dialog.raise_() + except Exception: + pass + try: + self.controls_dialog.activateWindow() + except Exception: + pass + if focus == 'camera': + try: + self.controls_dialog.focus_camera_section() + except Exception: + pass + elif focus == 'slice': + try: + self.controls_dialog.focus_slice_section() + except Exception: + pass + return + except Exception: + pass + try: + from viewer.hkl_controls_dialog import HKLControlsDialog + self.controls_dialog = HKLControlsDialog(self) + if focus == 'camera': + try: + self.controls_dialog.focus_camera_section() + except Exception: + pass + elif focus == 'slice': + try: + self.controls_dialog.focus_slice_section() + except Exception: + pass + self.controls_dialog.show() + except Exception: + pass + + # Availability + def _is_data_loaded(self) -> bool: + return bool(self.cloud_mesh is not None) + + def _slice_points_exist(self) -> bool: + try: + return (self.slab is not None) and (getattr(self.slab, 'n_points', 0) > 0) + except Exception: + return False + + def _set_slice_controls_enabled(self, enabled: bool): + try: + for wname in ('gbSteps', 'gbOrientation', 'gbTranslate', 'gbRotate'): + w = getattr(self, wname, None) + if w: + w.setEnabled(bool(enabled)) + if hasattr(self, 'tabsControls') and hasattr(self, 'tabSlice'): + try: + idx = self.tabsControls.indexOf(self.tabSlice) + self.tabsControls.setTabEnabled(idx, bool(enabled)) + except Exception: + pass + except Exception: + pass + + def _refresh_availability(self): + try: + data_loaded = self._is_data_loaded() + slice_points_exist = self._slice_points_exist() + if hasattr(self, 'actionExtractSlice'): + self.actionExtractSlice.setEnabled(bool(slice_points_exist)) + self._set_slice_controls_enabled(bool(data_loaded)) + except Exception: + pass + + # Data setup and create scene + def setup_3d_cloud(self, cloud, intensity, shape): + if cloud is None or (isinstance(cloud, np.ndarray) and cloud.size == 0): + self.cloud_mesh = None + return False + if isinstance(cloud, np.ndarray): + self.cloud_mesh = pyv.PolyData(cloud) + self.cloud_mesh['intensity'] = intensity + else: + self.cloud_mesh = cloud.copy(deep=True) if hasattr(cloud, 'copy') else cloud + self.cloud_mesh['intensity'] = intensity + self.orig_shape = shape + self.curr_shape = shape + return True + + def create_3D(self, cloud=None, intensity=None): + # Clear previous actors/widgets but keep axes stable + try: + self.plotter.clear() + for name in ("cloud_volume", "slab_points", "origin_sphere", "normal_line"): + if name in getattr(self.plotter, 'actors', {}): + self.plotter.remove_actor(name, reset_camera=False) + except Exception: + pass + + # Cloud + self.cloud_mesh = pyv.PolyData(cloud) + self.cloud_mesh['intensity'] = intensity + self.lut.scalar_range = (float(np.min(intensity)), float(np.max(intensity))) + self.lut2.scalar_range = (float(np.min(intensity)), float(np.max(intensity))) + + # Points actor (use naming "cloud_volume" like viewer/hkl_3d.py) + self.points_actor = self.plotter.add_mesh( + self.cloud_mesh, + scalars='intensity', + cmap=self.lut, + point_size=5.0, + name='cloud_volume', + reset_camera=False, + show_edges=False, + show_scalar_bar=True, + ) + + # Bounds/axes + try: + self.plotter.show_bounds( + mesh=self.cloud_mesh, + xtitle='H Axis', ytitle='K Axis', ztitle='L Axis', + ticks='inside', minor_ticks=True, + n_xlabels=7, n_ylabels=7, n_zlabels=7, + x_color='red', y_color='green', z_color='blue', + font_size=20, + ) + except Exception: + try: + self.plotter.show_bounds(mesh=self.cloud_mesh, xtitle='H Axis', ytitle='K Axis', ztitle='L Axis') + except Exception: + pass + + # Plane widget + slice_normal = (0, 0, 1) + slice_origin = self.cloud_mesh.center + self._plane_widget = self.plotter.add_plane_widget( + callback=self.on_plane_update, + normal=slice_normal, + origin=slice_origin, + bounds=self.cloud_mesh.bounds, + factor=1.0, + implicit=True, + assign_to_axis=None, + tubing=False, + origin_translation=True, + outline_opacity=0, + ) + # Initialize stored plane state + try: + self._plane_normal = np.array(slice_normal, dtype=float) + self._plane_origin = np.array(slice_origin, dtype=float) + except Exception: + self._plane_normal = np.array([0.0, 0.0, 1.0], dtype=float) + self._plane_origin = np.array(self.cloud_mesh.center, dtype=float) if self.cloud_mesh is not None else np.array([0.0, 0.0, 0.0], dtype=float) + + # Ensure slice lock overlay exists and is hidden initially + try: + self._ensure_slice_lock_text_actor() + if self._slice_lock_text_actor is not None: + self._slice_lock_text_actor.SetVisibility(False) + except Exception: + pass + + # Labels and sliders + try: + self.lbCurrentPointSizeNum.setText(str(len(cloud))) + self.lbCurrentResolutionX.setText(str(self.curr_shape[0])) + self.lbCurrentResolutionY.setText(str(self.curr_shape[1])) + except Exception: + pass + try: + imin, imax = int(np.min(intensity)), int(np.max(intensity)) + self.sbMinIntensity.setRange(imin, imax) + self.sbMinIntensity.setValue(imin) + self.sbMaxIntensity.setRange(imin, imax) + self.sbMaxIntensity.setValue(imax) + except Exception: + pass + + self.update_intensity() + try: + self.update_info_slice_labels() + self._refresh_availability() + except Exception: + pass + + # Camera + try: + self.plotter.set_focus(self.cloud_mesh.center) + self.plotter.reset_camera() + except Exception: + pass + + def _remove_plane_widget(self): + """Safely remove existing plane widget (if any).""" + try: + if self._plane_widget is not None: + try: + self._plane_widget.EnabledOff() + except Exception: + pass + try: + self.plotter.clear_plane_widgets() + except Exception: + pass + self._plane_widget = None + except Exception: + pass + + # Loading + def load_data(self): + file_name, _ = QFileDialog.getOpenFileName(self, 'Select an HDF5 File', '', 'HDF5 Files (*.h5 *.hdf5);;All Files (*)') + if not file_name: + try: + QMessageBox.warning(self, 'File', 'No Valid File Selected') + except Exception: + pass + return + self.current_file_path = file_name + # reflect file path in UI if present + try: + if hasattr(self, 'leFilePathStr') and self.leFilePathStr is not None: + self.leFilePathStr.setText(file_name) + except Exception: + pass + + original_title = self.windowTitle() + self.setEnabled(False) + self.setWindowTitle(f"{original_title} ***** Loading...") + QApplication.setOverrideCursor(Qt.WaitCursor) + QApplication.processEvents() + + # Hard reset interactive widgets and prior actors + self._remove_plane_widget() + try: + for name in ("cloud_volume", "slab_points", "origin_sphere", "normal_line"): + if name in getattr(self.plotter, 'actors', {}): + self.plotter.remove_actor(name, reset_camera=False) + except Exception: + pass + try: + conv = RSMConverter() + points, intensities, num_images, shape = conv.load_h5_to_3d(file_name) + if points.size == 0 or intensities.size == 0: + QMessageBox.warning(self, 'Loading Warning', 'No valid point data found in HDF5 file') + return + if self.setup_3d_cloud(points, intensities, shape): + self.num_images = num_images + self.create_3D(cloud=points, intensity=intensities) + try: + self.groupBox3DViewer.setTitle(f'Viewing {num_images} Image(s)') + self.lbOriginalPointSizeNum.setText(str(len(points))) + self.lbOriginalResolutionX.setText(str(shape[0])) + self.lbOriginalResolutionY.setText(str(shape[1])) + # reflect current shape + self.curr_shape = shape + except Exception: + pass + try: + self.update_info_slice_labels() + self._refresh_availability() + except Exception: + pass + except Exception as e: + import traceback + error_msg = f"Error loading data: {str(e)}\n\nTraceback:\n{traceback.format_exc()}" + try: + QMessageBox.critical(self, 'Error Loading Data', error_msg) + except Exception: + pass + finally: + QApplication.restoreOverrideCursor() + self.setEnabled(True) + self.setWindowTitle(original_title) + + # Slice points extraction + def on_plane_update(self, normal, origin): + # If slice is locked, immediately restore widget to stored state and do not update slice + try: + if bool(getattr(self, '_slice_locked', False)): + self._restore_locked_plane_widget() + return + except Exception: + pass + if self.cloud_mesh is None: + return + normal = self.normalize_vector(np.array(normal, dtype=float)) + origin = np.array(origin, dtype=float) + + # Compute slice mask + vec = self.cloud_mesh.points - origin + dist = np.dot(vec, normal) + thickness = 0.002 + mask = np.abs(dist) < thickness + + # Extract masked points + try: + self.slab = self.cloud_mesh.extract_points(mask) + except Exception: + self.slab = None + + # Remove previous slice points actor if present + try: + if 'slab_points' in getattr(self.plotter, 'actors', {}): + self.plotter.remove_actor('slab_points', reset_camera=False) + except Exception: + pass + + # Add new slice points actor (name: slab_points) + if self.slab is not None and getattr(self.slab, 'n_points', 0) > 0: + self.slab_actor = self.plotter.add_mesh( + self.slab, + name='slab_points', + render_points_as_spheres=True, + point_size=10, + scalars='intensity', + cmap=self.lut2, + show_scalar_bar=False, + ) + else: + self.slab_actor = None + + # Sync plane widget + try: + if self._plane_widget is not None: + self._plane_widget.SetNormal(normal) + self._plane_widget.SetOrigin(origin) + except Exception: + pass + + # Update labels and render + try: + self.update_info_slice_labels() + if hasattr(self, 'lbInfoPointsCurrVal'): + try: + n_curr = int(getattr(self.slab, 'n_points', 0) or 0) + except Exception: + n_curr = 0 + self.lbInfoPointsCurrVal.setText(str(n_curr)) + except Exception: + pass + try: + self.plotter.render() + except Exception: + pass + self._refresh_availability() + + # Update stored plane state + try: + self._plane_normal = np.array(normal, dtype=float) + self._plane_origin = np.array(origin, dtype=float) + except Exception: + pass + + # Intensity and colormap updates + def update_intensity(self): + try: + min_i = self.sbMinIntensity.value() + max_i = self.sbMaxIntensity.value() + except Exception: + return + if min_i > max_i: + min_i, max_i = max_i, min_i + try: + self.sbMinIntensity.setValue(min_i) + self.sbMaxIntensity.setValue(max_i) + except Exception: + pass + new_range = (float(min_i), float(max_i)) + + # Update points actor + try: + if self.points_actor is not None: + self.points_actor.mapper.scalar_range = new_range + except Exception: + pass + # Update slice points actor + try: + if self.slab_actor is not None: + self.slab_actor.mapper.scalar_range = new_range + except Exception: + pass + # Update scalar bars + try: + if hasattr(self.plotter, 'scalar_bars'): + for _, sb in self.plotter.scalar_bars.items(): + if sb: + sb.GetLookupTable().SetTableRange(new_range[0], new_range[1]) + sb.Modified() + except Exception: + pass + try: + self.plotter.render() + except Exception: + pass + + # Maintain visibility based on toggles (match viewer/hkl_3d.py behavior) + try: + self.toggle_cloud_vol() + except Exception: + pass + try: + self.toggle_pointer() + except Exception: + pass + + def change_color_map(self): + color_map_select = self.cbColorMapSelect.currentText() + new_lut = pyv.LookupTable(cmap=color_map_select) + new_lut2 = pyv.LookupTable(cmap=color_map_select) + new_lut.apply_opacity([0, 1]) + new_lut2.apply_opacity([0, 1]) + self.lut = new_lut + self.lut2 = new_lut2 + + try: + if self.points_actor is not None: + self.points_actor.mapper.lookup_table = self.lut + if self.slab_actor is not None: + self.slab_actor.mapper.lookup_table = self.lut2 + except Exception: + pass + try: + if hasattr(self.plotter, 'scalar_bars'): + for _, sb in self.plotter.scalar_bars.items(): + if sb: + sb.SetLookupTable(self.lut) + sb.Modified() + except Exception: + pass + try: + self.plotter.render() + except Exception: + pass + + # Toggles + def toggle_pointer(self): + """Toggle visibility of the slice points and plane widget (like viewer/hkl_3d.py).""" + vis = True + try: + vis = bool(self.cbToggleSlicePointer.isChecked()) + except Exception: + pass + try: + if 'slab_points' in getattr(self.plotter, 'actors', {}): + self.plotter.renderer._actors['slab_points'].SetVisibility(vis) + except Exception: + pass + try: + widgets = getattr(self.plotter, 'plane_widgets', []) + for pw in widgets or []: + try: + if vis: + pw.EnabledOn() + else: + pw.EnabledOff() + except Exception: + pass + except Exception: + pass + try: + self.plotter.render() + except Exception: + pass + + def toggle_cloud_vol(self): + """Toggle visibility of the full cloud actor (named 'cloud_volume').""" + vis = True + try: + # Use cbTogglePoints from this UI to drive cloud visibility + vis = bool(self.cbTogglePoints.isChecked()) + except Exception: + pass + try: + if 'cloud_volume' in getattr(self.plotter, 'actors', {}): + self.plotter.renderer._actors['cloud_volume'].SetVisibility(vis) + except Exception: + pass + try: + self.plotter.render() + except Exception: + pass + + def _ensure_slice_lock_text_actor(self): + try: + if self._slice_lock_text_actor is None: + self._slice_lock_text_actor = self.plotter.add_text( + "Slice Locked", + position='upper_left', + font_size=16, + color='white' + ) + except Exception: + self._slice_lock_text_actor = None + + def toggle_slice_lock(self): + # Update lock state + try: + self._slice_locked = bool(self.cbLockSlice.isChecked()) + except Exception: + self._slice_locked = not bool(getattr(self, '_slice_locked', False)) + # Ensure overlay exists + self._ensure_slice_lock_text_actor() + # Set overlay visibility + try: + if self._slice_lock_text_actor is not None: + self._slice_lock_text_actor.SetVisibility(bool(self._slice_locked)) + except Exception: + pass + # If we just locked, restore widget to stored plane state + if bool(self._slice_locked): + self._restore_locked_plane_widget() + try: + self.plotter.render() + except Exception: + pass + + def _restore_locked_plane_widget(self): + try: + if (self._plane_widget is not None) and (self._plane_normal is not None) and (self._plane_origin is not None): + self._plane_widget.SetNormal(np.array(self._plane_normal, dtype=float)) + self._plane_widget.SetOrigin(np.array(self._plane_origin, dtype=float)) + except Exception: + pass + + # (Removed) Volume toggle: no volume actor in point-only slice window + + # Extract slice (save slice points as 2D slice dataset) + def extract_slice(self): + if not self._slice_points_exist(): + try: + QMessageBox.warning(self, 'No Slice', 'No slice points available to extract') + except Exception: + pass + return + default_name = f"slice_extract_{np.datetime64('now').astype('datetime64[s]').astype(str).replace(':', '-')}.h5" + file_path, _ = QFileDialog.getSaveFileName(self, 'Save hkl Slice Data', default_name, 'HDF5 Files (*.h5 *.hdf5);;All Files (*)') + if not file_path: + return + + # Gather slice points and intensities + try: + # Use self.slab for consistency + slice_points = np.array(self.slab.points) + slice_intensities = np.array(self.slab['intensity']) + except Exception: + try: + QMessageBox.critical(self, 'Extract Error', 'Failed to read slice points/intensity') + except Exception: + pass + return + + # Plane state + normal, origin = self.get_plane_state() + + # Metadata + slice_metadata = { + 'data_type': 'slice', + 'slice_normal': list(map(float, self.normalize_vector(np.array(normal, dtype=float)))) , + 'slice_origin': list(map(float, np.array(origin, dtype=float))), + 'num_points': int(len(slice_points)), + 'original_file': str(self.current_file_path or 'unknown'), + 'original_shape': list(map(int, self.orig_shape)) if isinstance(self.orig_shape, (tuple, list)) else [0, 0], + 'extraction_timestamp': str(np.datetime64('now')), + } + + # Save via HDF5Loader + try: + loader = HDF5Loader() + success = loader.extract_slice( + file_path=file_path, + points=slice_points, + intensities=slice_intensities, + metadata=slice_metadata, + shape=self.orig_shape if isinstance(self.orig_shape, (tuple, list)) else None, + ) + if success: + try: + QMessageBox.information(self, 'Success', f'Slice extracted and saved successfully!\n{len(slice_points)} points saved.') + except Exception: + pass + else: + try: + QMessageBox.critical(self, 'Error', f'Failed to save slice: {loader.get_last_error()}') + except Exception: + pass + except Exception as e: + try: + QMessageBox.critical(self, 'Extract Error', f'Error extracting slice: {str(e)}') + except Exception: + pass + + # Camera controls + def zoom_in(self): + cam = self.plotter.camera + try: + step = float(self._zoom_step) + if not np.isfinite(step) or step <= 1.0: + step = 1.5 + except Exception: + step = 1.5 + cam.zoom(step) + self.plotter.render() + + def zoom_out(self): + cam = self.plotter.camera + try: + step = float(self._zoom_step) + if not np.isfinite(step) or step <= 1.0: + step = 1.5 + except Exception: + step = 1.5 + cam.zoom(1.0 / step) + self.plotter.render() + + def reset_camera(self): + self.plotter.reset_camera() + self.plotter.render() + + def set_camera_position(self): + pos_src = getattr(self, '_cam_pos_selection', None) + if not pos_src: + try: + if hasattr(self, 'cbSetCamPos') and self.cbSetCamPos is not None: + pos_src = self.cbSetCamPos.currentText() + elif hasattr(self, 'camSetPosCombo') and self.camSetPosCombo is not None: + pos_src = self.camSetPosCombo.currentText() + except Exception: + pass + pos_text = (pos_src or '').strip().lower() + p = self.plotter + cam = getattr(p, 'camera', None) + + def _set_focus_to_data_center(): + try: + if self.cloud_mesh is not None and hasattr(self.cloud_mesh, 'center'): + p.set_focus(self.cloud_mesh.center) + except Exception: + pass + + if ('xy' in pos_text) or ('hk' in pos_text): + _set_focus_to_data_center(); p.view_xy() + elif ('yz' in pos_text) or ('kl' in pos_text): + _set_focus_to_data_center(); p.view_yz() + elif ('xz' in pos_text) or ('hl' in pos_text): + _set_focus_to_data_center(); p.view_xz() + elif 'iso' in pos_text: + _set_focus_to_data_center() + try: + p.view_isometric() + except Exception: + try: + p.view_vector((1.0, 1.0, 1.0)) + if cam is not None: + cam.view_up = (0.0, 0.0, 1.0) + except Exception: + pass + else: + _set_focus_to_data_center() + label = (pos_text or '') + try: + if ('h+' in label) or ('x+' in label): + p.view_vector((1.0, 0.0, 0.0)); cam.view_up = (0.0, 0.0, 1.0) + elif ('h-' in label) or ('x-' in label): + p.view_vector((-1.0, 0.0, 0.0)); cam.view_up = (0.0, 0.0, 1.0) + elif ('k+' in label) or ('y+' in label): + p.view_vector((0.0, 1.0, 0.0)); cam.view_up = (0.0, 0.0, 1.0) + elif ('k-' in label) or ('y-' in label): + p.view_vector((0.0, -1.0, 0.0)); cam.view_up = (0.0, 0.0, 1.0) + elif ('l+' in label) or ('z+' in label): + p.view_vector((0.0, 0.0, 1.0)); cam.view_up = (0.0, 1.0, 0.0) + elif ('l-' in label) or ('z-' in label): + p.view_vector((0.0, 0.0, -1.0)); cam.view_up = (0.0, 1.0, 0.0) + except Exception: + pass + try: + if cam is not None and hasattr(cam, 'orthogonalize_view_up'): + cam.orthogonalize_view_up() + except Exception: + pass + try: + p.render() + except Exception: + pass + + def _apply_cam_preset_button(self, label: str): + try: + self._cam_pos_selection = label + except Exception: + pass + try: + self.set_camera_position() + except Exception: + try: + if 'hk' in label.lower() or 'xy' in label.lower(): + self.plotter.view_xy() + elif 'kl' in label.lower() or 'yz' in label.lower(): + self.plotter.view_yz() + elif 'hl' in label.lower() or 'xz' in label.lower(): + self.plotter.view_xz() + self.plotter.render() + except Exception: + pass + + def view_slice_normal(self): + try: + normal, origin = self.get_plane_state() + normal = self.normalize_vector(np.array(normal, dtype=float)) + origin = np.array(origin, dtype=float) + cam = getattr(self.plotter, 'camera', None) + if cam is None: + return + try: + rng = self.cloud_mesh.points.max(axis=0) - self.cloud_mesh.points.min(axis=0) + distance = float(np.linalg.norm(rng)) * 0.5 + except Exception: + distance = 1.0 + try: + cam.focal_point = origin.tolist() + cam.position = (origin + normal * distance).tolist() + up = np.array(getattr(cam, 'view_up', [0.0, 1.0, 0.0]), dtype=float) + upn = self.normalize_vector(up) + if abs(float(np.dot(upn, normal))) > 0.99: + new_up = np.array([0.0, 1.0, 0.0], dtype=float) if abs(normal[1]) < 0.99 else np.array([1.0, 0.0, 0.0], dtype=float) + cam.view_up = new_up.tolist() + except Exception: + pass + self.plotter.render() + except Exception: + pass + + # Slice control helpers + def _on_translate_step_changed(self, val: float): + self._slice_translate_step = float(val) + + def _on_rotate_step_changed(self, val: float): + self._slice_rotate_step_deg = float(val) + + def _on_orientation_changed(self, idx: int): + if not self._ensure_data_loaded_or_warn(): + return + preset = getattr(self, '_slice_orientation_selection', None) + if not preset: + try: + preset = self.cbSliceOrientation.currentText() + except Exception: + preset = 'HK(xy)' + self.set_plane_preset(preset) + + def _on_custom_normal_changed(self): + # Guard: do nothing if slice is locked + if bool(getattr(self, '_slice_locked', False)): + self._restore_locked_plane_widget() + return + if not self._ensure_data_loaded_or_warn(): + return + preset = (str(getattr(self, '_slice_orientation_selection', '')) or '').lower() + if preset.startswith('custom'): + try: + n_raw = np.array(getattr(self, '_custom_normal', [0.0, 0.0, 1.0]), dtype=float) + except Exception: + n_raw = np.array([0.0, 0.0, 1.0], dtype=float) + n = self.normalize_vector(n_raw) + _, origin = self.get_plane_state() + self.on_plane_update(n, origin) + + def _on_reset_slice(self): + # Guard: do nothing if slice is locked + if bool(getattr(self, '_slice_locked', False)): + self._restore_locked_plane_widget() + return + if not self._ensure_data_loaded_or_warn(): + return + try: + center = self.cloud_mesh.center if (self.cloud_mesh is not None) else np.array([0.0, 0.0, 0.0]) + normal = np.array([0.0, 0.0, 1.0], dtype=float) + self.on_plane_update(normal, center) + except Exception: + pass + + # Plane helpers + def get_plane_state(self): + # Prefer stored state once initialized for consistency under lock + try: + if (self._plane_normal is not None) and (self._plane_origin is not None): + return np.array(self._plane_normal, dtype=float), np.array(self._plane_origin, dtype=float) + except Exception: + pass + try: + if hasattr(self.plotter, 'plane_widgets') and self.plotter.plane_widgets: + pw = self.plotter.plane_widgets[0] + normal = np.array(pw.GetNormal(), dtype=float) + origin = np.array(pw.GetOrigin(), dtype=float) + return normal, origin + except Exception: + pass + normal = np.array([0.0, 0.0, 1.0], dtype=float) + try: + origin = np.array(self.cloud_mesh.center, dtype=float) + except Exception: + origin = np.array([0.0, 0.0, 0.0], dtype=float) + return normal, origin + + def set_plane_state(self, normal, origin): + # Guard: do nothing if slice is locked + if bool(getattr(self, '_slice_locked', False)): + self._restore_locked_plane_widget() + return + n = self.normalize_vector(np.array(normal, dtype=float)) + o = np.array(origin, dtype=float) + self.on_plane_update(n, o) + + def normalize_vector(self, v): + v = np.array(v, dtype=float) + n = float(np.linalg.norm(v)) + if not np.isfinite(n) or n <= 0.0: + return np.array([0.0, 0.0, 1.0], dtype=float) + return v / n + + def set_plane_preset(self, preset_text: str): + # Guard: do nothing if slice is locked + if bool(getattr(self, '_slice_locked', False)): + self._restore_locked_plane_widget() + return + preset = preset_text.lower() + if 'xy' in preset or 'hk' in preset: + n = np.array([0.0, 0.0, 1.0], dtype=float) + elif 'yz' in preset or 'kl' in preset: + n = np.array([1.0, 0.0, 0.0], dtype=float) + elif 'xz' in preset or 'hl' in preset: + n = np.array([0.0, 1.0, 0.0], dtype=float) + else: + try: + n = np.array(getattr(self, '_custom_normal', [0.0, 0.0, 1.0]), dtype=float) + except Exception: + n = np.array([0.0, 0.0, 1.0], dtype=float) + n = self.normalize_vector(n) + _, origin = self.get_plane_state() + self.set_plane_state(n, origin) + + def nudge_along_normal(self, sign: int): + # Guard: do nothing if slice is locked + if bool(getattr(self, '_slice_locked', False)): + self._restore_locked_plane_widget() + return + if not self._ensure_data_loaded_or_warn(): + return + normal, origin = self.get_plane_state() + step = float(self._slice_translate_step) + origin_new = origin + float(sign) * step * normal + self.set_plane_state(normal, origin_new) + + def nudge_along_axis(self, axis: str, sign: int): + # Guard: do nothing if slice is locked + if bool(getattr(self, '_slice_locked', False)): + self._restore_locked_plane_widget() + return + if not self._ensure_data_loaded_or_warn(): + return + axis = axis.upper() + if axis == 'H': + d = np.array([1.0, 0.0, 0.0], dtype=float) + elif axis == 'K': + d = np.array([0.0, 1.0, 0.0], dtype=float) + else: + d = np.array([0.0, 0.0, 1.0], dtype=float) + normal, origin = self.get_plane_state() + step = float(self._slice_translate_step) + origin_new = origin + float(sign) * step * d + self.set_plane_state(normal, origin_new) + + def rotate_about_axis(self, axis: str, deg: float): + # Guard: do nothing if slice is locked + if bool(getattr(self, '_slice_locked', False)): + self._restore_locked_plane_widget() + return + if not self._ensure_data_loaded_or_warn(): + return + axis = axis.upper() + if axis == 'H': + u = np.array([1.0, 0.0, 0.0], dtype=float) + elif axis == 'K': + u = np.array([0.0, 1.0, 0.0], dtype=float) + else: + u = np.array([0.0, 0.0, 1.0], dtype=float) + normal, origin = self.get_plane_state() + theta = float(np.deg2rad(deg)) + ux, uy, uz = u + c, s = np.cos(theta), np.sin(theta) + R = np.array([ + [c+ux*ux*(1-c), ux*uy*(1-c)-uz*s, ux*uz*(1-c)+uy*s], + [uy*ux*(1-c)+uz*s, c+uy*uy*(1-c), uy*uz*(1-c)-ux*s], + [uz*ux*(1-c)-uy*s, uz*uy*(1-c)+ux*s, c+uz*uz*(1-c)] + ], dtype=float) + new_normal = R @ normal + new_normal = self.normalize_vector(new_normal) + self.set_plane_state(new_normal, origin) + + # Info labels + def update_info_slice_labels(self): + try: + orient_text = getattr(self, '_slice_orientation_selection', None) + if not orient_text: + try: + if hasattr(self, 'cbSliceOrientation') and self.cbSliceOrientation is not None: + orient_text = self.cbSliceOrientation.currentText() + except Exception: + orient_text = '-' + if orient_text is None or orient_text == '': + orient_text = '-' + normal, origin = self.get_plane_state() + n = self.normalize_vector(np.array(normal, dtype=float)) + o = np.array(origin, dtype=float) + # Display floats with 5 decimal places + n_str = f"[{n[0]:0.5f}, {n[1]:0.5f}, {n[2]:0.5f}]" + o_str = f"[{o[0]:0.5f}, {o[1]:0.5f}, {o[2]:0.5f}]" + try: + if hasattr(self, 'lbSliceOrientationVal'): + self.lbSliceOrientationVal.setText(str(orient_text)) + except Exception: + pass + try: + if hasattr(self, 'lbSliceNormalVal'): + self.lbSliceNormalVal.setText(n_str) + except Exception: + pass + try: + if hasattr(self, 'lbSliceOriginVal'): + self.lbSliceOriginVal.setText(o_str) + except Exception: + pass + try: + pos_text = '-' + orient_lower = (str(orient_text) or '').lower() + if ('hk' in orient_lower) or ('xy' in orient_lower): + pos_text = f"L = {o[2]:0.5f}" + elif ('kl' in orient_lower) or ('yz' in orient_lower): + pos_text = f"H = {o[0]:0.5f}" + elif ('hl' in orient_lower) or ('xz' in orient_lower): + pos_text = f"K = {o[1]:0.5f}" + else: + s = float(np.dot(n, o)) + pos_text = f"n·origin = {s:0.5f}" + if hasattr(self, 'lbSlicePositionVal'): + self.lbSlicePositionVal.setText(pos_text) + except Exception: + pass + # Reflect availability of Extract action based on existence + try: + if hasattr(self, 'actionExtractSlice'): + self.actionExtractSlice.setEnabled(self._slice_points_exist()) + except Exception: + pass + except Exception: + pass + + def _ensure_data_loaded_or_warn(self) -> bool: + try: + if self.cloud_mesh is not None: + return True + except Exception: + pass + try: + QMessageBox.warning(self, 'No Data', 'Load data before adjusting the slice.') + except Exception: + pass + return False + + +# add main +if __name__ == '__main__': + try: + app = QApplication(sys.argv) + window = HKL3DSliceWindow() + window.show() + size_manager = SizeManager(app=app) + sys.exit(app.exec_()) + except KeyboardInterrupt: + sys.exit(0) diff --git a/viewer/hkl_3d_viewer.py b/viewer/hkl_3d_viewer.py index 83b8c85..0d6aeb7 100644 --- a/viewer/hkl_3d_viewer.py +++ b/viewer/hkl_3d_viewer.py @@ -1,77 +1,403 @@ -import sys -import argparse +import sys, pathlib +import os +import h5py +import time +import subprocess import numpy as np -import open3d as o3d -import matplotlib.pyplot as plt -# from pyqtgraph import colormap -from PyQt5.QtWidgets import QApplication, QPushButton, QMainWindow -#TODO: add axis, legend, and labels - -class HKL3DViewer(QMainWindow): - def __init__(self, qx, qy, qz, intensity): - super().__init__() - self.qx = qx - self.qy = qy - self.qz = qz - self.intensity = intensity - self.initUI() - - def initUI(self): - self.setWindowTitle("HKL Data Viewer") - self.setGeometry(100, 100, 300, 200) - - # Button to open Open3D visualization - btn = QPushButton("Show 3D Plot", self) - btn.setGeometry(80, 80, 140, 40) - btn.clicked.connect(self.show_3d_plot) - - def show_3d_plot(self) -> None: - # open_3d_plot(self.qx, self.qy, self.qz, self.intensity) - points = np.column_stack((self.qx, self.qy, self.qz)) +import os.path as osp +import pyqtgraph as pg +import pyvista as pyv +import pyvistaqt as pyvqt +from pyvistaqt import QtInteractor, BackgroundPlotter +from PyQt5 import uic +# from epics import caget +from PyQt5.QtCore import QTimer, QThread, pyqtSignal, Qt +from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QFileDialog, QMessageBox +# Custom imported classes +# Add the parent directory to the path so the font_scaling.py file can be imported +sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) +from utils import PVAReader, HDF5Writer, SizeManager +from hkl_3d_slice_window import HKL3DSliceWindow + + +class ConfigDialog(QDialog): + + def __init__(self): + """ + Class that does initial setup for getting the pva prefix, collector address, + and the path to the json that stores the pvs that will be observed + + Attributes: + input_channel (str): Input channel for PVA. + config_path (str): Path to the ROI configuration file. + """ + super(ConfigDialog,self).__init__() + uic.loadUi('gui/pv_config.ui', self) + self.setWindowTitle('PV Config') + # initializing variables to pass to Image Viewer + self.input_channel = "" + self.config_path = "" + # class can be prefilled with text + self.init_ui() - # Normalize qz for color mapping - intensity_min, intensity_max = np.min(self.intensity), np.max(self.intensity) - norm_intensity = (self.intensity - intensity_min) / (intensity_max - intensity_min) + # Connecting signasl to + self.btn_clear.clicked.connect(self.clear_pv_setup) + self.btn_browse.clicked.connect(self.browse_file_dialog) + self.btn_accept_reject.accepted.connect(self.dialog_accepted) - # Apply a colormap - cmap = plt.get_cmap("jet") - colors = cmap(norm_intensity)[:, :3] # Extract RGB + def init_ui(self) -> None: + """ + Prefills text in the Line Editors for the user. + """ + self.le_input_channel.setText(self.le_input_channel.text()) + self.le_config.setText(self.le_config.text()) + def browse_file_dialog(self) -> None: + """ + Opens a file dialog to select the path to a TOML configuration file. + """ + self.pvs_path, _ = QFileDialog.getOpenFileName(self, 'Select TOML Config', 'pv_configs', '*.toml (*.toml)') - # Create Open3D point cloud - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(points) - pcd.colors = o3d.utility.Vector3dVector(colors) + self.le_config.setText(self.pvs_path) + + def clear_pv_setup(self) -> None: + """ + Clears line edit that tells image view where the config file is. + """ + self.le_config.clear() + def dialog_accepted(self) -> None: + """ + Handles the final step when the dialog's accept button is pressed. + Starts the HKLImageWindow process with filled information. + """ + self.input_channel = self.le_input_channel.text() + self.config_path = self.le_config.text() + if osp.isfile(self.config_path) or (self.config_path == ''): + self.hkl_3d_viewer = HKLImageWindow(input_channel=self.input_channel, + file_path=self.config_path,) + else: + print(f'File Path {self.config_path} Doesn\'t Exitst') + #TODO: ADD ERROR Dialog rather than print message so message is clearer + self.new_dialog = ConfigDialog() + self.new_dialog.show() - # Show the Open3D plot - try: - axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0,0,0]) - o3d.visualization.draw_geometries([pcd, axes]) - # o3d.visualization.draw_geometries([pcd]) +class HKLImageWindow(QMainWindow): + images_plotted = pyqtSignal(bool) + + def __init__(self, input_channel='s6lambda1:Pva1:Image', file_path=''): + """ + Initializes the main window for real-time image visualization and manipulation. + + Args: + input_channel (str): The PVA input channel for the detector. + file_path (str): The file path for loading configuration. + """ + super(HKLImageWindow, self).__init__() + uic.loadUi('gui/hkl_viewer_window.ui', self) + self.setWindowTitle('HKL Viewer') + self.show() + + # Initializing Viewer variables + self.reader = None + self.image = None + self.call_id_plot = 0 + self.image_is_transposed = False + self._input_channel = input_channel + self.pv_prefix.setText(self._input_channel) + self._file_path = file_path + + # Initializing but not starting timers so they can be reached by different functions + self.timer_labels = QTimer() + self.file_writer_thread = QThread() + self.timer_labels.timeout.connect(self.update_labels) + + # HKL values + self.hkl_config = None + self.hkl_data = {} + self.qx = None + self.qy = None + self.qz = None + self.processes = {} + + # Adding widgets manually to have better control over them + pyv.set_plot_theme('dark') + self.plotter = QtInteractor(self) + self.viewer_layout.addWidget(self.plotter,1,1) + + # pyvista vars + self.actor = None + self.lut = None + self.cloud = None + self.min_intensity = 0.0 + self.max_intensity = 0.0 + self.min_opacity = 0.0 + self.max_opacity = 1.0 + self.plotter.add_axes(xlabel='H', ylabel='K', zlabel='L') + + # Connecting the signals to the code that will be executed + self.pv_prefix.returnPressed.connect(self.start_live_view_clicked) + self.pv_prefix.textChanged.connect(self.update_pv_prefix) + self.start_live_view.clicked.connect(self.start_live_view_clicked) + self.stop_live_view.clicked.connect(self.stop_live_view_clicked) + # self.plotting_frequency.valueChanged.connect(self.start_timers) + # self.log_image.clicked.connect(self.update_image) + self.sbox_min_intensity.editingFinished.connect(self.update_intensity) + self.sbox_max_intensity.editingFinished.connect(self.update_intensity) + self.sbox_min_opacity.editingFinished.connect(self.update_opacity) + self.sbox_max_opacity.editingFinished.connect(self.update_opacity) + self.btn_3d_slice_window.clicked.connect(self.open_3d_slice_window) + + def start_timers(self) -> None: + """ + Starts timers for updating labels and plotting at specified frequencies. + """ + self.timer_labels.start(int(1000/100)) + + def stop_timers(self) -> None: + """ + Stops the updating of main window labels and plots. + """ + self.timer_labels.stop() + + def start_live_view_clicked(self) -> None: + """ + Initializes the connections to the PVA channel using the provided Channel Name. + + This method ensures that any existing connections are cleared and re-initialized. + Also starts monitoring the stats and adds ROIs to the viewer. + """ + try: + # A double check to make sure there isn't a connection already when starting + self.stop_timers() + self.plotter.clear() + if self.reader is None: + self.reader = PVAReader(input_channel=self._input_channel, + config_filepath=self._file_path, + viewer_type='rsm') + self.file_writer = HDF5Writer(self.reader.OUTPUT_FILE_LOCATION, self.reader) + self.file_writer.moveToThread(self.file_writer_thread) + else: + self.btn_save_h5.clicked.disconnect() + self.btn_plot_cache.clicked.disconnect() + self.file_writer.hdf5_writer_finished.disconnect() + if self.reader.channel.isMonitorActive(): + self.reader.stop_channel_monitor() + if self.file_writer_thread.isRunning(): + self.file_writer_thread.quit() + self.file_writer_thread.wait() + del self.reader + self.reader = PVAReader(input_channel=self._input_channel, + config_filepath=self._file_path, + viewer_type='rsm') + self.file_writer.pva_reader = self.reader + self.btn_save_h5.clicked.connect(self.save_caches_clicked) + self.btn_plot_cache.clicked.connect(self.update_image_from_button) + self.reader.reader_scan_complete.connect(self.update_image_from_scan) + #self.images_plotted.connect(self.trigger_save_caches) + #self.file_writer.hdf5_writer_finished.connect(self.on_writer_finished) + if self.reader.CACHING_MODE == 'scan': + self.file_writer_thread.start() except Exception as e: - print(f'Failed to perform visualization:{e}') - sys.exit(2) + print(f'Failed to Connect to {self._input_channel}: {e}') + del self.reader + self.reader = None + self.provider_name.setText('N/A') + self.is_connected.setText('Disconnected') + + if self.reader is not None: + # self.set_pixel_ordering() + self.reader.start_channel_monitor() + self.start_timers() + + def stop_live_view_clicked(self) -> None: + """ + Clears the connection for the PVA channel and stops all active monitors. -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Visualize Q-space data using Open3D.") - parser.add_argument("--qx-file", type=str, required=True, help="Path to NumPy file containing qx array.") - parser.add_argument("--qy-file", type=str, required=True, help="Path to NumPy file containing qy array.") - parser.add_argument("--qz-file", type=str, required=True, help="Path to NumPy file containing qz array.") - parser.add_argument("--intensity-file", type=str, required=True, help="Path to NumPy file containing Intensity array.") + This method also updates the UI to reflect the disconnected state. + """ + if self.reader is not None: + self.reader.stop_channel_monitor() + self.stop_timers() + self.provider_name.setText('N/A') + self.is_connected.setText('Disconnected') + + def trigger_save_caches(self, clear_caches:bool=True) -> None: + if not self.file_writer_thread.isRunning(): + self.file_writer_thread.start() + self.file_writer.save_caches_to_h5(clear_caches=clear_caches) + + def save_caches_clicked(self) -> None: + if not self.reader.channel.isMonitorActive(): + if not self.file_writer_thread.isRunning(): + self.file_writer_thread.start() + self.file_writer.save_caches_to_h5() + else: + QMessageBox.critical(None, + 'Error', + 'Stop Live View to Save Cache', + QMessageBox.Ok) + + def on_writer_finished(self, message) -> None: + print(message) + self.file_writer_thread.quit() + self.file_writer_thread.wait() + + # def freeze_image_checked(self) -> None: + # """ + # Toggles freezing/unfreezing of the plot based on the checked state + # without stopping the collection of PVA objects. + # """ + # if self.reader is not None: + # if self.freeze_image.isChecked(): + # self.stop_timers() + # else: + # self.start_timers() + + def update_pv_prefix(self) -> None: + """ + Updates the input channel prefix based on the value entered in the prefix field. + """ + self._input_channel = self.pv_prefix.text() + + def update_labels(self) -> None: + """ + Updates the UI labels with current connection and cached data. + """ + if self.reader is not None: + provider_name = f"{self.reader.provider if self.reader.channel.isMonitorActive() else 'N/A'}" + is_connected = 'Connected' if self.reader.channel.isMonitorActive() else 'Disconnected' + self.provider_name.setText(provider_name) + self.is_connected.setText(is_connected) + self.missed_frames_val.setText(f'{self.reader.frames_missed:d}') + self.frames_received_val.setText(f'{self.reader.frames_received:d}') + + def update_image_from_scan(self) -> None: + self.update_image(is_scan_signal=True) + + def update_image_from_button(self) -> None: + self.update_image(is_scan_signal=False) + + def update_image(self, is_scan_signal:bool=False) -> None: + """ + Redraws plots based on the configured update rate. + + Processes the image data according to main window settings, such as rotation + and log transformations. Also sets initial min/max pixel values in the UI. + """ + if self.reader is not None: + self.call_id_plot +=1 + if self.reader.cached_images is not None and self.reader.cached_qx is not None: + self.plotter.clear() + try: + num_images = len(self.reader.cached_images) + num_rsm = len(self.reader.cached_qx) + if num_images != num_rsm: + raise ValueError(f'Size of caches are uneven:\nimages:{num_images}\nqxyz: {num_rsm}') + # connect all cached data + flat_intensity = np.concatenate(self.reader.cached_images, dtype=np.float32) + qx = np.concatenate(self.reader.cached_qx, dtype=np.float32) + qy = np.concatenate(self.reader.cached_qy, dtype=np.float32) + qz = np.concatenate(self.reader.cached_qz, dtype=np.float32) + + points = np.column_stack(( + qx, qy, qz + )) + except Exception as e: + print(f'[HKL Viewer] Failed to concatenate caches: {e}') + + + try: + if is_scan_signal: + clear_caches = True + self.images_plotted.emit(clear_caches) + + self.min_intensity = np.min(flat_intensity) + self.max_intensity = np.max(flat_intensity) + self.sbox_max_intensity.setValue(self.max_intensity) + + self.cloud = pyv.PolyData(points) + self.cloud['intensity'] = flat_intensity + + self.lut = pyv.LookupTable(cmap='viridis') + self.lut.below_range_color = 'black' + self.lut.above_range_color = 'black' + self.lut.below_range_opacity = 0 + self.lut.above_range_opacity = 0 + self.update_opacity() + self.update_intensity() + + self.actor = self.plotter.add_mesh( + self.cloud, + scalars='intensity', + cmap=self.lut, + point_size=3 + ) + + self.plotter.show_bounds(xtitle='H Axis', ytitle='K Axis', ztitle='L Axis') + except Exception as e: + print(f"[HKL Viewer] Failed to update 3D plot: {e}") + + def update_opacity(self) -> None: + """ + Updates the min/max intensity levels in the HKL Viewer based on UI settings. + """ + """ + Updates the min/max intensity levels in the HKL Viewer based on UI settings. + """ + self.min_opacity = self.sbox_min_opacity.value() + self.max_opacity = self.sbox_max_opacity.value() + if self.min_opacity > self.max_opacity: + self.min_opacity, self.max_opacity = self.max_opacity, self.min_opacity + self.sbox_min_opacity.setValue(self.min_opacity) + self.sbox_max_opacity.setValue(self.max_opacity) + if self.lut is not None: + self.lut.apply_opacity([self.min_opacity,self.max_opacity]) + + def update_intensity(self) -> None: + """ + Updates the min/max intensity levels in the HKL Viewer based on UI settings. + """ + self.min_intensity = self.sbox_min_intensity.value() + self.max_intensity = self.sbox_max_intensity.value() + if self.min_intensity > self.max_intensity: + self.min_intensity, self.max_intensity = self.max_intensity, self.min_intensity + self.sbox_min_intensity.setValue(self.min_intensity) + self.sbox_max_intensity.setValue(self.max_intensity) + if self.lut is not None: + self.lut.scalar_range = (self.min_intensity, self.max_intensity) + if self.actor is not None: + self.actor.mapper.scalar_range = (self.min_intensity,self.max_intensity) + + def closeEvent(self, event): + """pass + Custom close event to clean up resources, including stat dialogs. + + Args: + event (QCloseEvent): The close event triggered when the main window is closed. + """ + if self.file_writer_thread.isRunning(): + self.file_writer_thread.quit() + self.file_writer_thread + super(HKLImageWindow,self).closeEvent(event) + + def open_3d_slice_window(self) -> None: + try: + self.slice_window = HKL3DSliceWindow(self) + self.slice_window.show() + except Exception as e: + import traceback + with open('error_output2.txt','w') as f: + f.write(f"Traceback:\n{traceback.format_exc()}\n\nError:\n{str(e)}") - args = parser.parse_args() +if __name__ == '__main__': try: - qx = np.load(args.qx_file) - qy = np.load(args.qy_file) - qz = np.load(args.qz_file) - intensity = np.load(args.intensity_file) - except Exception as e: - print(f"Failed to Load Numpy File: {e}") - - app = QApplication(sys.argv) - window = HKL3DViewer(qx, qy,qz,intensity) - window.show() - sys.exit(app.exec_()) + app = QApplication(sys.argv) + window = ConfigDialog() + window.show() + size_manager = SizeManager(app=app) + sys.exit(app.exec_()) + except KeyboardInterrupt: + sys.exit(0) \ No newline at end of file diff --git a/viewer/hkl_controls_dialog.py b/viewer/hkl_controls_dialog.py new file mode 100644 index 0000000..28beb9e --- /dev/null +++ b/viewer/hkl_controls_dialog.py @@ -0,0 +1,205 @@ +from PyQt5 import uic +from PyQt5.QtWidgets import QDialog + + +class HKLControlsDialog(QDialog): + """ + Modeless dialog that encapsulates Slice and Camera controls for the HKL 3D viewer. + Wires UI signals to methods on the main HKL3DSliceWindow instance and updates small state + variables (e.g., _zoom_step, _cam_pos_selection) without the main window directly reading + dialog widgets. + """ + def __init__(self, main): + super().__init__(parent=main) + self.main = main + uic.loadUi('gui/controls/hkl_controls_dialog.ui', self) + # Initialize slice orientation and custom normal from main + try: + if hasattr(self.main, '_slice_orientation_selection') and self.main._slice_orientation_selection: + self.cbSliceOrientation.setCurrentText(str(self.main._slice_orientation_selection)) + except Exception: + pass + try: + cn = getattr(self.main, '_custom_normal', [0.0, 0.0, 1.0]) + self.sbNormH.setValue(float(cn[0])) + self.sbNormK.setValue(float(cn[1])) + self.sbNormL.setValue(float(cn[2])) + except Exception: + pass + + # Camera controls wiring + try: + self.btnZoomIn.clicked.connect(self.main.zoom_in) + except Exception: + pass + try: + self.btnZoomOut.clicked.connect(self.main.zoom_out) + except Exception: + pass + try: + self.btnResetCamera.clicked.connect(self.main.reset_camera) + except Exception: + pass + try: + # Keep local state in main; avoid main reading this widget directly + self.sbZoomStep.valueChanged.connect(self._on_zoom_step_changed) + # Initialize spinbox with main's current zoom step if available + if hasattr(self.main, '_zoom_step'): + try: + self.sbZoomStep.setValue(float(self.main._zoom_step)) + except Exception: + pass + except Exception: + pass + try: + # Camera preset selection: update main's state; execution triggered by Set button + self.cbSetCamPos.currentTextChanged.connect(self._on_cam_pos_changed) + self.btnSetCamPos.clicked.connect(self.main.set_camera_position) + except Exception: + pass + try: + self.btnHKView.clicked.connect(lambda: self.main._apply_cam_preset_button('HK(xy)')) + except Exception: + pass + try: + self.btnKLView.clicked.connect(lambda: self.main._apply_cam_preset_button('KL(yz)')) + except Exception: + pass + try: + self.btnHLView.clicked.connect(lambda: self.main._apply_cam_preset_button('HL(xz)')) + except Exception: + pass + try: + self.btnViewSliceNormal.clicked.connect(self.main.view_slice_normal) + except Exception: + pass + + # Slice controls wiring + try: + self.sbSliceTranslateStep.valueChanged.connect(self.main._on_translate_step_changed) + except Exception: + pass + try: + self.sbSliceRotateStep.valueChanged.connect(self.main._on_rotate_step_changed) + except Exception: + pass + try: + self.cbSliceOrientation.currentIndexChanged.connect(self._on_slice_orientation_changed) + except Exception: + pass + try: + self.sbNormH.editingFinished.connect(self._on_custom_normal_spinboxes_changed) + self.sbNormK.editingFinished.connect(self._on_custom_normal_spinboxes_changed) + self.sbNormL.editingFinished.connect(self._on_custom_normal_spinboxes_changed) + except Exception: + pass + + # Translate buttons + try: + self.btnSliceUpNormal.clicked.connect(lambda: self.main.nudge_along_normal(+1)) + except Exception: + pass + try: + self.btnSliceDownNormal.clicked.connect(lambda: self.main.nudge_along_normal(-1)) + except Exception: + pass + try: + self.btnSlicePosH.clicked.connect(lambda: self.main.nudge_along_axis('H', +1)) + self.btnSliceNegH.clicked.connect(lambda: self.main.nudge_along_axis('H', -1)) + except Exception: + pass + try: + self.btnSlicePosK.clicked.connect(lambda: self.main.nudge_along_axis('K', +1)) + self.btnSliceNegK.clicked.connect(lambda: self.main.nudge_along_axis('K', -1)) + except Exception: + pass + try: + self.btnSlicePosL.clicked.connect(lambda: self.main.nudge_along_axis('L', +1)) + self.btnSliceNegL.clicked.connect(lambda: self.main.nudge_along_axis('L', -1)) + except Exception: + pass + + # Rotate buttons + try: + self.btnRotPlusH.clicked.connect(lambda: self.main.rotate_about_axis('H', +float(getattr(self.main, '_slice_rotate_step_deg', 1.0)))) + self.btnRotMinusH.clicked.connect(lambda: self.main.rotate_about_axis('H', -float(getattr(self.main, '_slice_rotate_step_deg', 1.0)))) + except Exception: + pass + try: + self.btnRotPlusK.clicked.connect(lambda: self.main.rotate_about_axis('K', +float(getattr(self.main, '_slice_rotate_step_deg', 1.0)))) + self.btnRotMinusK.clicked.connect(lambda: self.main.rotate_about_axis('K', -float(getattr(self.main, '_slice_rotate_step_deg', 1.0)))) + except Exception: + pass + try: + self.btnRotPlusL.clicked.connect(lambda: self.main.rotate_about_axis('L', +float(getattr(self.main, '_slice_rotate_step_deg', 1.0)))) + self.btnRotMinusL.clicked.connect(lambda: self.main.rotate_about_axis('L', -float(getattr(self.main, '_slice_rotate_step_deg', 1.0)))) + except Exception: + pass + try: + self.btnResetSlice.clicked.connect(self.main._on_reset_slice) + except Exception: + pass + + # Dialog properties + try: + # Modeless by default; caller decides modality if needed + self.setModal(False) + except Exception: + pass + + # ---------- Dialog-side slots updating main window state ---------- + def _on_zoom_step_changed(self, val: float): + try: + self.main._zoom_step = float(val) + except Exception: + self.main._zoom_step = 1.5 + + def _on_cam_pos_changed(self, text: str): + try: + self.main._cam_pos_selection = str(text) + except Exception: + self.main._cam_pos_selection = None + + def _on_slice_orientation_changed(self, idx: int): + # Update main state with current orientation selection then delegate + try: + text = self.cbSliceOrientation.currentText() + except Exception: + text = 'HK(xy)' + try: + self.main._slice_orientation_selection = text + except Exception: + pass + try: + self.main._on_orientation_changed(idx) + except Exception: + pass + + def _on_custom_normal_spinboxes_changed(self): + # Update main state with current custom normal then delegate + try: + h = float(self.sbNormH.value()) + k = float(self.sbNormK.value()) + l = float(self.sbNormL.value()) + self.main._custom_normal = [h, k, l] + except Exception: + self.main._custom_normal = [0.0, 0.0, 1.0] + try: + self.main._on_custom_normal_changed() + except Exception: + pass + + # ---------- Focus helpers ---------- + def focus_camera_section(self): + try: + # Focus movements group; optionally scroll if a scroll area is added later + self.gbCamMovements.setFocus() + except Exception: + pass + + def focus_slice_section(self): + try: + # Focus steps group for convenience + self.gbSteps.setFocus() + except Exception: + pass diff --git a/viewer/hkl_slice_2d_view.py b/viewer/hkl_slice_2d_view.py new file mode 100644 index 0000000..0bce1ea --- /dev/null +++ b/viewer/hkl_slice_2d_view.py @@ -0,0 +1,481 @@ +import numpy as np +from typing import Optional, Tuple + +from PyQt5 import uic +from PyQt5.QtCore import QTimer +from PyQt5.QtWidgets import QWidget, QVBoxLayout +import pyqtgraph as pg + + +class HKLSlice2DView(QWidget): + """ + Lightweight 2D slice view that mirrors the current slice from the parent HKL3DSliceWindow. + - No file I/O, no extra controls. + - Inherits min/max intensity and colormap directly from the parent. + - Updates are throttled with a QTimer to avoid re-rasterizing on every drag event. + """ + + def __init__(self, parent): + super().__init__(parent=parent) + self.parent = parent + # Load .ui and setup host layout for embedded 2D plot + uic.loadUi('gui/hkl_slice_2d_view.ui', self) + + # Plot with ImageView + PlotItem for axis labeling + self.plot_item = pg.PlotItem() + self.image_view = pg.ImageView(view=self.plot_item) + # Axis labels via PlotItem + self.plot_item.setLabel('bottom', 'U') + self.plot_item.setLabel('left', 'V') + # Optional: lock aspect for square pixels + try: + self.image_view.view.setAspectLocked(True) + except Exception: + pass + try: + self.layoutPlotHost.addWidget(self.image_view) + except Exception: + # Fallback: attach directly if layout not found + fallback_layout = QVBoxLayout(self) + fallback_layout.setContentsMargins(6, 6, 6, 6) + fallback_layout.addWidget(self.image_view) + # Add a lightweight text overlay to indicate slice orientation and value of orthogonal axis + try: + self._slice_info_text = pg.TextItem("", color="w", anchor=(0, 1)) + self.plot_item.addItem(self._slice_info_text) + except Exception: + self._slice_info_text = None + + # Pending update queue + throttle timer + self._pending = None # type: Optional[Tuple[object, np.ndarray, np.ndarray]] + self._timer = QTimer(self) + self._timer.setInterval(100) # ~10 fps coalesced updates + self._timer.timeout.connect(self._flush_pending) + + # Store initial parent settings for consistency + self._last_synced_levels = None + self._last_synced_colormap = None + + # Initial sync of display settings from parent + try: + self.sync_levels() + except Exception: + pass + try: + self.sync_colormap() + except Exception: + pass + + def schedule_update(self, slice_mesh, normal: np.ndarray, origin: np.ndarray) -> None: + """ + Called by the parent after updating the 3D slice. Stores latest data and starts the coalescing timer. + """ + try: + # Store latest references; slice_mesh is a PyVista dataset (PolyData) + self._pending = (slice_mesh, np.array(normal, dtype=float), np.array(origin, dtype=float)) + if not self._timer.isActive(): + self._timer.start() + except Exception: + # Silently ignore to avoid impacting parent + pass + + def _flush_pending(self) -> None: + """ + Timer slot: perform a single rasterization from the most recent pending slice and update the ImageItem, + with axes labeled to HK/KL/HL and plot ranges set to physical coordinates. + """ + try: + self._timer.stop() + if not self._pending: + return + slice_mesh, normal, origin = self._pending + self._pending = None + + # Extract points and intensities from the PyVista slice mesh + try: + pts = np.asarray(slice_mesh.points, dtype=float) # (N,3) + except Exception: + pts = np.empty((0, 3), dtype=float) + try: + vals = np.asarray(slice_mesh["intensity"], dtype=float).reshape(-1) + except Exception: + vals = np.zeros((len(pts),), dtype=float) + + if pts.size == 0 or vals.size == 0 or pts.shape[0] != vals.shape[0]: + # Nothing to render + return + + # Target raster shape: prefer parent's curr_shape, then orig_shape, else fallback + target_shape = self._get_target_shape() + H = max(int(target_shape[0]), 1) + W = max(int(target_shape[1]), 1) + + # Rasterize to 2D image + axis ranges/orientation + result = self._rasterize_to_image(pts, vals, normal, origin, H, W) + if result is None: + return + image, U_min, U_max, V_min, V_max, orientation, orth_label, orth_value = result + if image is None or (hasattr(image, "size") and image.size == 0): + return + + # Update the image content + try: + self.image_view.setImage( + image.astype(np.float32), + autoLevels=False, + autoRange=False, + autoHistogramRange=False + ) + except Exception: + # Fallback to underlying ImageItem + try: + self.image_view.imageItem.setImage(image.astype(np.float32), autoLevels=False) + except Exception: + pass + + # Apply item transform to map pixels to physical HKL coordinates + try: + it = self.image_view.imageItem + try: + it.resetTransform() + except Exception: + try: + it.setTransform(pg.QtGui.QTransform()) # identity + except Exception: + pass + sx = float(U_max - U_min) / float(W if W != 0 else 1) + sy = float(V_max - V_min) / float(H if H != 0 else 1) + if not np.isfinite(sx) or sx == 0.0: + sx = 1.0 + if not np.isfinite(sy) or sy == 0.0: + sy = 1.0 + try: + it.scale(sx, sy) + it.setPos(U_min, V_min) + except Exception: + pass + except Exception: + pass + + # Set axis ranges and labels based on orientation + try: + self.plot_item.setXRange(U_min, U_max, padding=0) + self.plot_item.setYRange(V_min, V_max, padding=0) + except Exception: + pass + try: + if orientation == "HK": + self.plot_item.setLabel('bottom', 'H') + self.plot_item.setLabel('left', 'K') + elif orientation == "KL": + self.plot_item.setLabel('bottom', 'K') + self.plot_item.setLabel('left', 'L') + elif orientation == "HL": + self.plot_item.setLabel('bottom', 'H') + self.plot_item.setLabel('left', 'L') + else: + self.plot_item.setLabel('bottom', 'U') + self.plot_item.setLabel('left', 'V') + except Exception: + pass + + # Update slice info text (e.g., "HK plane (L = 1.23)") + try: + if getattr(self, "_slice_info_text", None): + txt = str(orientation) + if txt and txt != "Custom": + txt += " plane" + if orth_label is not None and orth_value is not None and np.isfinite(orth_value): + txt += f" ({orth_label} = {orth_value:.2f})" + self._slice_info_text.setText(txt) + try: + # Place near top-left of current view + self._slice_info_text.setPos(U_min, V_max) + except Exception: + pass + except Exception: + pass + + # Inherit levels and colormap + self.sync_levels() + self.sync_colormap() + except Exception: + # Keep errors contained to avoid breaking parent interactions + pass + + def sync_levels(self) -> None: + """ + Inherit min/max intensity levels from the parent and apply them to the ImageItem. + Only updates if levels have changed to avoid unnecessary operations. + """ + try: + vmin = float(self.parent.sbMinIntensity.value()) + vmax = float(self.parent.sbMaxIntensity.value()) + if vmin > vmax: + vmin, vmax = vmax, vmin + + # Check if levels have changed + current_levels = (vmin, vmax) + if self._last_synced_levels != current_levels: + try: + # ImageView supports setLevels(min, max) + self.image_view.setLevels(vmin, vmax) + except Exception: + try: + self.image_view.imageItem.setLevels((vmin, vmax)) + except Exception: + pass + self._last_synced_levels = current_levels + except Exception: + pass + + def sync_colormap(self) -> None: + """ + Inherit the current colormap from the parent and apply it to the ImageItem. + Only updates if colormap has changed to avoid unnecessary operations. + """ + try: + cmap_name = str(self.parent.cbColorMapSelect.currentText()) + except Exception: + cmap_name = "viridis" + + # Check if colormap has changed + if self._last_synced_colormap == cmap_name: + return + + lut = None + # Try pyqtgraph ColorMap + try: + if hasattr(pg, "colormap") and hasattr(pg.colormap, "get"): + try: + cmap = pg.colormap.get(cmap_name) + except Exception: + # Some names may be in matplotlib but not in pg + cmap = None + if cmap is not None: + lut = cmap.getLookupTable(nPts=256) + except Exception: + lut = None + + # Fallback via matplotlib if needed + if lut is None: + try: + import matplotlib.cm as cm + mpl_cmap = cm.get_cmap(cmap_name) + # Build LUT as uint8 Nx3 + xs = np.linspace(0.0, 1.0, 256, dtype=float) + colors = mpl_cmap(xs, bytes=True) # returns Nx4 uint8 + lut = colors[:, :3] + except Exception: + # Last resort: grayscale + xs = (np.linspace(0, 255, 256)).astype(np.uint8) + lut = np.column_stack([xs, xs, xs]) + + try: + self.image_view.imageItem.setLookupTable(lut) + self._last_synced_colormap = cmap_name + except Exception: + pass + + def sync_all_settings(self) -> None: + """ + Synchronize all rendering settings from parent (levels, colormap, etc.). + Called when the 2D view is first opened or when major changes occur. + """ + try: + # Force sync by clearing cached values + self._last_synced_levels = None + self._last_synced_colormap = None + + # Sync all settings + self.sync_levels() + self.sync_colormap() + + # Apply any reduction factor settings if available + try: + if hasattr(self.parent, '_applied_reduction_factor'): + # The 2D view inherits the same reduction as applied to the 3D view + pass # Already handled through target shape + except Exception: + pass + + except Exception: + pass + + def _get_target_shape(self) -> Tuple[int, int]: + """ + Determine target (H, W) for the raster image based on parent's known shapes. + Defaults to 512x512. + """ + # Prefer current shape if valid + try: + cs = getattr(self.parent, "curr_shape", None) + if isinstance(cs, (tuple, list)) and len(cs) == 2 and int(cs[0]) > 0 and int(cs[1]) > 0: + return int(cs[0]), int(cs[1]) + except Exception: + pass + # Fallback to original shape + try: + os_ = getattr(self.parent, "orig_shape", None) + if isinstance(os_, (tuple, list)) and len(os_) == 2 and int(os_[0]) > 0 and int(os_[1]) > 0: + return int(os_[0]), int(os_[1]) + except Exception: + pass + # Default + return (512, 512) + + def _infer_orientation_and_axes(self, normal: np.ndarray) -> Tuple[str, Optional[Tuple[int, int]], Optional[str]]: + """ + Infer slice orientation from the plane normal. + Returns (orientation, (u_idx, v_idx) for axis-aligned mapping or None, orth_label). + Orientation is one of 'HK', 'KL', 'HL', or 'Custom'. + u_idx/v_idx map to columns of pts (0:H, 1:K, 2:L). + orth_label is the axis perpendicular to the plane ('L' for HK, 'H' for KL, 'K' for HL). + """ + try: + n = np.array(normal, dtype=float) + n_norm = float(np.linalg.norm(n)) + if not np.isfinite(n_norm) or n_norm <= 0.0: + n = np.array([0.0, 0.0, 1.0], dtype=float) + else: + n = n / n_norm + X = np.array([1.0, 0.0, 0.0], dtype=float) # H + Y = np.array([0.0, 1.0, 0.0], dtype=float) # K + Z = np.array([0.0, 0.0, 1.0], dtype=float) # L + tol = 0.95 + dX = abs(float(np.dot(n, X))) + dY = abs(float(np.dot(n, Y))) + dZ = abs(float(np.dot(n, Z))) + if dZ >= tol: + # Normal ~ L → HK plane + return "HK", (0, 1), "L" + if dX >= tol: + # Normal ~ H → KL plane + return "KL", (1, 2), "H" + if dY >= tol: + # Normal ~ K → HL plane + return "HL", (0, 2), "K" + return "Custom", None, None + except Exception: + return "Custom", None, None + + def _rasterize_to_image( + self, + pts: np.ndarray, + vals: np.ndarray, + normal: np.ndarray, + origin: np.ndarray, + H: int, + W: int, + ) -> Optional[Tuple[np.ndarray, float, float, float, float, str, Optional[str], Optional[float]]]: + """ + Rasterize the slice to an HxW image and compute physical axis ranges and orientation. + Returns a tuple: (image, U_min, U_max, V_min, V_max, orientation, orth_label, orth_value) + - orientation in {'HK','KL','HL','Custom'} + - U/V correspond to physical axes when orientation is axis-aligned; otherwise derived basis projection. + - orth_label/orth_value represent the axis perpendicular to the slice plane (e.g., 'L' and origin[2] for HK). + """ + try: + n = np.array(normal, dtype=float) + o = np.array(origin, dtype=float) + + # Normalize normal + n_norm = float(np.linalg.norm(n)) + if not np.isfinite(n_norm) or n_norm <= 0.0: + n = np.array([0.0, 0.0, 1.0], dtype=float) + else: + n = n / n_norm + + orientation, uv_idxs, orth_label = self._infer_orientation_and_axes(n) + + if uv_idxs is not None: + # Axis-aligned planes: use absolute HKL coordinates directly + u_idx, v_idx = uv_idxs + U = pts[:, u_idx].astype(float) + V = pts[:, v_idx].astype(float) + U_min, U_max = float(np.min(U)), float(np.max(U)) + V_min, V_max = float(np.min(V)), float(np.max(V)) + # Handle degenerate ranges + if (not np.isfinite(U_min)) or (not np.isfinite(U_max)) or (U_max == U_min): + U_min, U_max = -0.5, 0.5 + if (not np.isfinite(V_min)) or (not np.isfinite(V_max)) or (V_max == V_min): + V_min, V_max = -0.5, 0.5 + # Weighted histogram (average) + sum_img, _, _ = np.histogram2d(V, U, bins=[H, W], range=[[V_min, V_max], [U_min, U_max]], weights=vals) + cnt_img, _, _ = np.histogram2d(V, U, bins=[H, W], range=[[V_min, V_max], [U_min, U_max]]) + with np.errstate(invalid="ignore", divide="ignore"): + img = np.zeros_like(sum_img, dtype=np.float32) + nz = cnt_img > 0 + img[nz] = (sum_img[nz] / cnt_img[nz]).astype(np.float32) + img[~nz] = 0.0 + # Orthogonal axis value from origin + orth_value = None + try: + if orth_label == "L": + orth_value = float(o[2]) + elif orth_label == "H": + orth_value = float(o[0]) + elif orth_label == "K": + orth_value = float(o[1]) + except Exception: + orth_value = None + return img, U_min, U_max, V_min, V_max, orientation, orth_label, orth_value + + # Custom orientation: fall back to in-plane basis projection + # Choose a reference axis not parallel to n to make in-plane basis + world_axes = [ + np.array([1.0, 0.0, 0.0], dtype=float), + np.array([0.0, 1.0, 0.0], dtype=float), + np.array([0.0, 0.0, 1.0], dtype=float), + ] + ref = world_axes[0] + for ax in world_axes: + if abs(float(np.dot(ax, n))) < 0.9: + ref = ax + break + u = np.cross(n, ref) + u_norm = float(np.linalg.norm(u)) + if not np.isfinite(u_norm) or u_norm <= 0.0: + ref = np.array([0.0, 1.0, 0.0], dtype=float) + u = np.cross(n, ref) + u_norm = float(np.linalg.norm(u)) + if not np.isfinite(u_norm) or u_norm <= 0.0: + u = np.array([1.0, 0.0, 0.0], dtype=float) + u_norm = 1.0 + u = u / u_norm + v = np.cross(n, u) + v_norm = float(np.linalg.norm(v)) + if not np.isfinite(v_norm) or v_norm <= 0.0: + v = np.array([0.0, 1.0, 0.0], dtype=float) + + # Project points into plane coordinates (origin-relative for custom) + rel = pts - o[None, :] + U = rel.dot(u) # shape (N,) + V = rel.dot(v) # shape (N,) + + # Handle degenerate ranges + U_min, U_max = float(np.min(U)), float(np.max(U)) + V_min, V_max = float(np.min(V)), float(np.max(V)) + if not np.isfinite(U_min) or not np.isfinite(U_max) or (U_max == U_min): + U_min, U_max = -0.5, 0.5 + if not np.isfinite(V_min) or not np.isfinite(V_max) or (V_max == V_min): + V_min, V_max = -0.5, 0.5 + + # Histogram to image + sum_img, _, _ = np.histogram2d(V, U, bins=[H, W], range=[[V_min, V_max], [U_min, U_max]], weights=vals) + cnt_img, _, _ = np.histogram2d(V, U, bins=[H, W], range=[[V_min, V_max], [U_min, U_max]]) + with np.errstate(invalid="ignore", divide="ignore"): + img = np.zeros_like(sum_img, dtype=np.float32) + nz = cnt_img > 0 + img[nz] = (sum_img[nz] / cnt_img[nz]).astype(np.float32) + img[~nz] = 0.0 + + # Orthogonal scalar position for custom + try: + orth_value = float(np.dot(n, o)) + except Exception: + orth_value = None + + return img, U_min, U_max, V_min, V_max, "Custom", None, orth_value + except Exception: + return None diff --git a/viewer/launcher.py b/viewer/launcher.py new file mode 100644 index 0000000..24a2f18 --- /dev/null +++ b/viewer/launcher.py @@ -0,0 +1,360 @@ +import sys +import os, subprocess, sys +from pathlib import Path +from PyQt5 import uic +from PyQt5.QtWidgets import QApplication, QDialog, QMessageBox, QLabel, QPushButton, QHBoxLayout +from PyQt5.QtCore import QTimer, Qt +from viewer.views_registry.registry import VIEWS + + +class LauncherDialog(QDialog): + def __init__(self): + super(LauncherDialog, self).__init__() + uic.loadUi('gui/dashpva.ui', self) + self.processes = {} + self._timer = QTimer(self) + self._timer.setInterval(500) + self._timer.timeout.connect(self._poll_processes) + self._timer.start() + + # Insert dynamic "Views" section (from registry) just before Post Analysis Tools + try: + self._insert_views_section() + except Exception: + # Robust to UI changes; if insertion fails, skip silently + pass + + # Insert a Tools section with utility buttons (e.g., Metadata Converter) at the bottom + try: + self._insert_utils_section() + except Exception: + # Fail silently if layout changes; section is optional + pass + + + # Wire buttons to launchers + if hasattr(self, 'btn_hkl3d_viewer'): + self.btn_hkl3d_viewer.clicked.connect( + lambda: self.launch( + 'hkl3d_viewer', + [sys.executable, 'viewer/hkl_3d_viewer.py'], + self.btn_hkl3d_viewer, + 'HKL 3D Viewer — Running…' + ) + ) + if hasattr(self, 'btn_hkl3d_slicer'): + self.btn_hkl3d_slicer.clicked.connect( + lambda: self.launch( + 'hkl3d_slicer', + [sys.executable, 'viewer/hkl_3d_slice_window.py'], + self.btn_hkl3d_slicer, + 'HKL 3D Slicer — Running…' + ) + ) + if hasattr(self, 'btn_area_detector'): + self.btn_area_detector.clicked.connect( + lambda: self.launch( + 'area_detector', + [sys.executable, 'viewer/area_det_viewer.py'], + self.btn_area_detector, + 'Area Detector Viewer — Running…' + ) + ) + if hasattr(self, 'btn_pva_setup'): + self.btn_pva_setup.clicked.connect( + lambda: self.launch( + 'pva_setup', + [sys.executable, 'pva_setup/pva_workflow_setup_dialog.py'], + self.btn_pva_setup, + 'PVA Workflow Setup — Running…' + ) + ) + if hasattr(self, 'btn_sim_setup'): + self.btn_sim_setup.clicked.connect( + lambda: self.launch( + 'sim_setup', + [sys.executable, 'consumers/sim_rsm_data.py'], + self.btn_sim_setup, + 'caIOC(Name) — Running…', + quiet=True + ) + ) + if hasattr(self, 'btn_workbench'): + self.btn_workbench.clicked.connect( + lambda: self.launch( + 'workbench', + [sys.executable, 'viewer/workbench/workbench.py'], + self.btn_workbench, + 'Workbench — Running…' + ) + ) + if hasattr(self, 'btn_settings'): + self.btn_settings.clicked.connect( + lambda: self.launch( + 'settings', + [sys.executable, 'viewer/settings/settings_dialog.py'], + self.btn_settings, + 'Settings — Running…' + ) + ) + if hasattr(self, 'btn_exit'): + self.btn_exit.clicked.connect(self.request_close) + if hasattr(self, 'btn_shutdown_all'): + self.btn_shutdown_all.clicked.connect(self._confirm_shutdown_all) + + self._update_status() + + + def _insert_views_section(self): + """Insert a Monitor header and a single Monitor button before Post Analysis Tools.""" + layout = self.layout() + if layout is None: + return + target = getattr(self, 'lbl_post_analysis_header', None) + insert_at = layout.indexOf(target) if target is not None else -1 + if insert_at < 0: + # Fallback: append near end + insert_at = layout.count() + + # Header: Monitor + header = QLabel("Monitor", self) + header.setStyleSheet("font-weight: bold; color: #34495e; font-size: 12px;") + header.setAlignment(Qt.AlignCenter) + layout.insertWidget(insert_at, header) + insert_at += 1 + + # Single Monitor button + btn = QPushButton("Monitor", self) + btn.setToolTip("Open the scan monitor") + layout.insertWidget(insert_at, btn) + insert_at += 1 + + # Bind to existing launch(...) for process tracking + btn.clicked.connect( + lambda _=False: self.launch( + 'monitor_scan', + [sys.executable, 'viewer/scan_view.py'], + btn, + 'Monitor — Running…' + ) + ) + + def _insert_utils_section(self): + """Insert a 'Tools' header and buttons for utility tools (like Metadata Converter) + below the entire 'Post Analysis Tools' section, but above the status and bottom bar.""" + layout = self.layout() + if layout is None: + return + # Compute insertion point: + # Place just before the status label if present, otherwise before the bottom button bar, + # otherwise append at the end. + insert_at = -1 + target_status = getattr(self, 'lbl_status', None) + if target_status is not None: + idx = layout.indexOf(target_status) + if idx >= 0: + insert_at = idx + if insert_at < 0: + target_bar = getattr(self, 'horizontalLayout', None) + if target_bar is not None: + idx = layout.indexOf(target_bar) + if idx >= 0: + insert_at = idx + if insert_at < 0: + insert_at = layout.count() + + # Header + header = QLabel("Tools", self) + header.setStyleSheet("font-weight: bold; color: #34495e; font-size: 12px;") + header.setAlignment(Qt.AlignCenter) + layout.insertWidget(insert_at, header) + insert_at += 1 + + # Metadata Converter button + btn = QPushButton("Metadata Converter", self) + btn.setToolTip("Open the Metadata Converter tool") + layout.insertWidget(insert_at, btn) + insert_at += 1 + + # Bind to existing launch(...) for process tracking + btn.clicked.connect( + lambda _=False: self.launch( + 'metadata_converter', + [sys.executable, 'viewer/tools/metadata_converter_gui.py'], + btn, + 'Metadata Converter — Running…' + ) + ) + + def launch(self, key, cmd, button, running_text, quiet=False): + """Start a child process and update UI indicators.""" + if key in self.processes and self.processes[key]['popen'].poll() is None: + # Already running + return + original_text = button.text() + button.setEnabled(False) + button.setText(f"{original_text} — Launching…") + + kwargs = {} + if quiet: + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL + + try: + p = subprocess.Popen(cmd, **kwargs) + button.setText(running_text) + self.processes[key] = { + 'popen': p, + 'button': button, + 'original_text': original_text, + 'running_text': running_text + } + except Exception as e: + QMessageBox.critical( + self, + 'Launch Failed', + f'Failed to launch:\n{" ".join(cmd)}\n\n{e}' + ) + button.setText(original_text) + button.setEnabled(True) + + self._update_status() + + def _poll_processes(self): + """Periodic check for finished processes to restore UI state.""" + finished = [] + for key, entry in self.processes.items(): + p = entry['popen'] + if p.poll() is not None: + # Process ended + entry['button'].setText(entry['original_text']) + entry['button'].setEnabled(True) + finished.append(key) + for key in finished: + self.processes.pop(key, None) + self._update_status() + + def _update_status(self): + """Update status label and button states.""" + count = len(self.processes) + if hasattr(self, 'lbl_status'): + self.lbl_status.setText('No modules running' if count == 0 else f'{count} module(s) running') + if hasattr(self, 'btn_exit'): + # Exit remains enabled; closing will prompt if processes are running + self.btn_exit.setEnabled(True) + if hasattr(self, 'btn_shutdown_all'): + # Enable Shutdown All only when there are running modules + self.btn_shutdown_all.setEnabled(count > 0) + + def _format_running_modules_list(self): + """Return a human-readable list of running modules and their PIDs.""" + lines = [] + for key, entry in self.processes.items(): + p = entry.get('popen') + if p is None or p.poll() is not None: + continue + name = entry.get('running_text', key) + if ' — ' in name: + name = name.split(' — ')[0] + try: + pid = p.pid + except Exception: + pid = 'unknown' + lines.append(f"- {name} (PID {pid})") + if not lines: + return "Running modules:\nNone" + return "Running modules:\n" + "\n".join(lines) + + def _terminate_proc(self, p, timeout=3.0): + """Attempt graceful terminate, then force kill if still alive.""" + try: + if p.poll() is None: + p.terminate() + try: + p.wait(timeout=timeout) + except Exception: + pass + if p.poll() is None: + p.kill() + except Exception: + pass + + def shutdown_all(self): + """Force-stop all running modules and restore UI state.""" + for key, entry in list(self.processes.items()): + self._terminate_proc(entry['popen']) + entry['button'].setText(entry['original_text']) + entry['button'].setEnabled(True) + self.processes.pop(key, None) + self._update_status() + + def _confirm_shutdown_all(self): + """Confirm and force-stop all running modules.""" + count = len(self.processes) + if count == 0: + return + text = f"{self._format_running_modules_list()}\n\nAre you sure you want to force stop all running modules?\n\nData might be lost." + resp = QMessageBox.question( + self, + 'Shutdown All Modules', + text, + QMessageBox.Yes | QMessageBox.No, + QMessageBox.No + ) + if resp == QMessageBox.Yes: + self.shutdown_all() + + def request_close(self): + """Prompt to force stop modules before exiting if any are running.""" + try: + any_running = any(entry['popen'].poll() is None for entry in self.processes.values()) + except Exception: + any_running = False + if any_running: + text = f"{self._format_running_modules_list()}\n\nForce stop all and exit?\n\nData might be lost." + resp = QMessageBox.question( + self, + 'Exit Launcher', + text, + QMessageBox.Yes | QMessageBox.No, + QMessageBox.No + ) + if resp == QMessageBox.Yes: + self.shutdown_all() + self.close() + else: + self.close() + + def closeEvent(self, event): + """On close, prompt to force-stop modules if any are running.""" + try: + any_running = any(entry['popen'].poll() is None for entry in self.processes.values()) + except Exception: + any_running = False + if any_running: + text = f"{self._format_running_modules_list()}\n\nForce stop all and exit?\n\nData might be lost." + resp = QMessageBox.question( + self, + 'Exit Launcher', + text, + QMessageBox.Yes | QMessageBox.No, + QMessageBox.No + ) + if resp == QMessageBox.Yes: + self.shutdown_all() + event.accept() + else: + event.ignore() + else: + event.accept() + + +def main(): + app = QApplication(sys.argv) + dlg = LauncherDialog() + dlg.show() + app.exec_() + + +if __name__ == '__main__': + main() diff --git a/viewer/pv_setup_dialog.py b/viewer/pv_setup_dialog.py deleted file mode 100755 index 7558f91..0000000 --- a/viewer/pv_setup_dialog.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -from PyQt5 import uic -from PyQt5.QtCore import Qt -from PyQt5.QtWidgets import QDialog, QFileDialog, QSizePolicy, QLabel, QFormLayout, QWidget, QFrame - - -class PVSetupDialog(QDialog): - def __init__(self, parent, file_mode, path=None): - super(PVSetupDialog,self).__init__(parent) - uic.loadUi('gui/edit_add_config_dialog.ui',self) - self.config_dict = {} - self.path = path - self.file_mode = file_mode - - self.form_widget = QWidget() - self.config_layout = QFormLayout(parent=self.form_widget) - self.config_layout.setLabelAlignment(Qt.AlignRight) - self.scroll_area.setWidget(self.form_widget) - self.scroll_area.setWidgetResizable(True) - - self.load_config() - self.show() - - def save_file_dialog(self): - path, _ = QFileDialog.getSaveFileName(self, 'Save File', 'pv_configs', '.json (*.json)') - return path - - def load_config(self): - if self.file_mode == 'w': - return - with open(self.path, "r") as config_json: - self.config_dict: dict = json.load(config_json) - for key, value in self.config_dict.items(): - # set the label part of the form widget - label = QLabel(key + ':') - label.setMinimumHeight(35) - label.setSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) - # set the field part of the form widget - field = QLabel(value) - field.setMinimumHeight(35) - field.setSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) - field.setFrameShape(QFrame.Shape.Box) - field.setFrameShadow(QFrame.Shadow.Sunken) - - self.config_layout.addRow(label, field) \ No newline at end of file diff --git a/viewer/pva_reader.py b/viewer/pva_reader.py deleted file mode 100644 index 4948c4c..0000000 --- a/viewer/pva_reader.py +++ /dev/null @@ -1,282 +0,0 @@ -import toml -import numpy as np -import pvaccess as pva -import bitshuffle -import blosc2 -import lz4.block -from epics import camonitor, caget - -class PVAReader: - def __init__(self, input_channel='s6lambda1:Pva1:Image', provider=pva.PVA, config_filepath: str = 'pv_configs/metadata_pvs.toml'): - """ - Initializes the PVA Reader for monitoring connections and handling image data. - - Args: - input_channel (str): Input channel for the PVA connection. - provider (protocol): The protocol for the PVA channel. - config_filepath (str): File path to the configuration TOML file. - """ - # Each PVA ScalarType is enumerated in C++ starting 1-10 - # This means we map them as numbers to a numpy datatype which we parse from pva codec parameters - # Then use this to correctly decompress the image depending on the codec used - self.NUMPY_DATA_TYPE_MAP = { - pva.UBYTE : np.dtype('uint8'), - pva.BYTE : np.dtype('int8'), - pva.USHORT : np.dtype('uint16'), - pva.SHORT : np.dtype('int16'), - pva.UINT : np.dtype('uint32'), - pva.INT : np.dtype('int32'), - pva.ULONG : np.dtype('uint64'), - pva.LONG : np.dtype('int64'), - pva.FLOAT : np.dtype('float32'), - pva.DOUBLE : np.dtype('float64') - } - # This also means we can parse the pva codec parameters to show the correct datatype in viewer - self. NTNDA_DATA_TYPE_MAP = { - pva.UBYTE : 'ubyteValue', - pva.BYTE : 'byteValue', - pva.USHORT : 'ushortValue', - pva.SHORT : 'shortValue', - pva.UINT : 'uintValue', - pva.INT : 'intValue', - pva.ULONG : 'ulongValue', - pva.LONG : 'longValue', - pva.FLOAT : 'floatValue', - pva.DOUBLE : 'doubleValue', - } - - self.input_channel = input_channel - self.provider = provider - self.config_filepath = config_filepath - self.channel = pva.Channel(self.input_channel, self.provider) - self.pva_prefix = input_channel.split(":")[0] - # variables that will store pva data - self.pva_object = None - self.image = None - self.shape = (0,0) - self.pixel_ordering = 'F' - self.image_is_transposed = False - self.attributes = [] - self.timestamp = None - self.data_type = None - self.display_dtype = None - # variables used for parsing analysis PV - self.analysis_index = None - self.analysis_exists = False - self.analysis_attributes = {} - # variables used for later logic - self.last_array_id = None - self.frames_missed = 0 - self.frames_received = 0 - self.id_diff = 0 - # variables used for ROI and Stats PVs from config - self.config = {} - self.rois = {} - self.stats = {} - - if self.config_filepath != '': - with open(self.config_filepath, 'r') as toml_file: - # loads the pvs in the toml file into a python dictionary - self.config:dict = toml.load(toml_file) - self.stats:dict = self.config["STATS"] - if self.config["CONSUMER_TYPE"] == "spontaneous": - # TODO: change to dictionaries that store postions as keys and pv as value - self.analysis_cache_dict = {"Intensity": {}, - "ComX": {}, - "ComY": {}, - "Position": {}} - - def pva_callbackSuccess(self, pv) -> None: - """ - Callback for handling monitored PV changes. - - Args: - pv (PvaObject): The PV object received by the channel monitor. - """ - self.pva_object = pv - self.parse_image_data_type() - self.pva_to_image() - self.parse_pva_attributes() - self.parse_roi_pvs() - if (self.analysis_index is None) and (not(self.analysis_exists)): #go in with the assumption analysis Doesn't Exist, is changed to True otherwise - self.analysis_index = self.locate_analysis_index() - # Only runs if an analysis index was found - if self.analysis_exists: - self.analysis_attributes = self.attributes[self.analysis_index] - if self.config["CONSUMER_TYPE"] == "spontaneous": - # turns axis1 and axis2 into a tuple - incoming_coord = (self.analysis_attributes["value"][0]["value"].get("Axis1", 0.0), - self.analysis_attributes["value"][0]["value"].get("Axis2", 0.0)) - # use a tuple as a key so that we can check if there is a repeat position - self.analysis_cache_dict["Intensity"].update({incoming_coord: self.analysis_cache_dict["Intensity"].get(incoming_coord, 0) + self.analysis_attributes["value"][0]["value"].get("Intensity", 0.0)}) - self.analysis_cache_dict["ComX"].update({incoming_coord: self.analysis_cache_dict["ComX"].get(incoming_coord, 0) + self.analysis_attributes["value"][0]["value"].get("ComX", 0.0)}) - self.analysis_cache_dict["ComY"].update({incoming_coord:self.analysis_cache_dict["ComY"].get(incoming_coord, 0) + self.analysis_attributes["value"][0]["value"].get("ComY", 0.0)}) - # double storing of the postion, will find out if needed - self.analysis_cache_dict["Position"][incoming_coord] = incoming_coord - - def roi_backup_callback(self, pvname, value, **kwargs): - name_components = pvname.split(":") - roi_key = name_components[1] - pv_key = name_components[2] - pv_value = value - # can't append simply by using 2 keys in a row (self.rois[roi_key][pv_key]), there must be an inner dict to call - # then adds the key to the inner dictionary with update - self.rois.setdefault(roi_key, {}).update({pv_key: pv_value}) - - def parse_image_data_type(self) -> None: - """ - Parses the PVA Object to determine the incoming data type. - """ - if self.pva_object is not None: - try: - self.data_type = list(self.pva_object['value'][0].keys())[0] - self.display_dtype = self.data_type if self.pva_object['codec']['name'] == '' else self.NTNDA_DATA_TYPE_MAP.get(self.pva_object['codec']['parameters'][0]['value']) - - except: - self.display_dtype = "could not detect" - - def parse_pva_attributes(self) -> None: - """ - Converts the PVA object to a Python dictionary and extracts its attributes. - """ - if self.pva_object is not None: - self.attributes: list = self.pva_object.get().get("attribute", []) - - def locate_analysis_index(self) -> int|None: - """ - Locates the index of the analysis attribute in the PVA attributes. - - Returns: - int: The index of the analysis attribute or None if not found. - """ - if self.attributes: - for i in range(len(self.attributes)): - attr_pv: dict = self.attributes[i] - if attr_pv.get("name", "") == "Analysis": - self.analysis_exists = True - return i - else: - return None - - def parse_roi_pvs(self) -> None: - """ - Parses attributes to extract ROI-specific PV information. - """ - if self.attributes: - for i in range(len(self.attributes)): - attr_pv: dict = self.attributes[i] - attr_name:str = attr_pv.get("name", "") - if "ROI" in attr_name: - name_components = attr_name.split(":") - prefix = name_components[0] - roi_key = name_components[1] - pv_key = name_components[2] - pv_value = attr_pv["value"][0]["value"] - # can't append simply by using 2 keys in a row, there must be a value to call to then add to - # then adds the key to the inner dictionary with update - self.rois.setdefault(roi_key, {}).update({pv_key: pv_value}) - - def pva_to_image(self) -> None: - """ - Converts the PVA Object to an image array and determines if a frame was missed. - Handles bslz4 and lz4 compressed image data. - """ - try: - if 'dimension' in self.pva_object: - self.shape = tuple([dim['size'] for dim in self.pva_object['dimension']]) - - if self.pva_object['codec']['name'] == 'bslz4': - # Handle BSLZ4 compressed data - dtype = self.NUMPY_DATA_TYPE_MAP.get(self.pva_object['codec']['parameters'][0]['value']) - uncompressed_size = self.pva_object['uncompressedSize'] // dtype.itemsize # size has to be divided by bytes needed to store dtype in bitshuffle - uncompressed_shape = (uncompressed_size,) - compressed_image = self.pva_object['value'][0][self.data_type] - # Decompress numpy array to correct datatype - self.image = bitshuffle.decompress_lz4(compressed_image, uncompressed_shape, dtype, 0) - - elif self.pva_object['codec']['name'] == 'lz4': - # Handle LZ4 compressed data - dtype = self.NUMPY_DATA_TYPE_MAP.get(self.pva_object['codec']['parameters'][0]['value']) - uncompressed_size = self.pva_object['uncompressedSize'] # raw size is used to decompress it into an lz4 buffer - compressed_image = self.pva_object['value'][0][self.data_type] - # Decompress using lz4.block - decompressed_bytes = lz4.block.decompress(compressed_image, uncompressed_size) - # Convert bytes to numpy array with correct dtype - self.image = np.frombuffer(decompressed_bytes, dtype=dtype) # dtype is used to convert from buffer to correct dtype from bytes - - elif self.pva_object['codec']['name'] == '': - # Handle uncompressed data - self.image = np.array(self.pva_object['value'][0][self.data_type]) - - self.image = self.image.reshape(self.shape, order=self.pixel_ordering).T if self.image_is_transposed else self.image.reshape(self.shape, order=self.pixel_ordering) - self.frames_received += 1 - else: - self.image = None - - # Check for missed frame starts here - current_array_id = self.pva_object['uniqueId'] - if self.last_array_id is not None: - self.id_diff = current_array_id - self.last_array_id - 1 - if (self.id_diff > 0): - self.frames_missed += self.id_diff - else: - self.id_diff = 0 - self.last_array_id = current_array_id - except Exception as e: - print(f"Failed to process image: {e}") - self.frames_missed += 1 - - def start_channel_monitor(self) -> None: - """ - Subscribes to the PVA channel with a callback function and starts monitoring for PV changes. - """ - self.channel.subscribe('pva callback success', self.pva_callbackSuccess) - self.channel.startMonitor() - - def start_roi_backup_monitor(self) -> None: - try: - for roi_num, roi_dict in self.config['ROI'].items(): - for config_key, pv_name in roi_dict.items(): - name_components = pv_name.split(":") - - roi_key = name_components[1] # ROI1-ROI4 - pv_key = name_components[2] # MinX, MinY, SizeX, SizeY - - self.rois.setdefault(roi_key, {}).update({pv_key: caget(pv_name)}) - camonitor(pvname=pv_name, callback=self.roi_backup_callback) - except Exception as e: - print(f'Failed to setup backup ROI monitor: {e}') - - def stop_channel_monitor(self) -> None: - """ - Stops all monitoring and callback functions. - """ - self.channel.unsubscribe('pva callback success') - self.channel.stopMonitor() - - def get_frames_missed(self) -> int: - """ - Returns the number of frames missed. - - Returns: - int: The number of missed frames. - """ - return self.frames_missed - - def get_pva_image(self) -> np.ndarray: - """ - Returns the current PVA image. - - Returns: - numpy.ndarray: The current image array. - """ - return self.image - - def get_attributes_dict(self) -> list[dict]: - """ - Returns the attributes of the current PVA object. - - Returns: - list: The attributes of the current PVA object. - """ - return self.attributes diff --git a/viewer/roi_cropping.py b/viewer/roi_cropping.py new file mode 100755 index 0000000..f1442fd --- /dev/null +++ b/viewer/roi_cropping.py @@ -0,0 +1,173 @@ +import pvaccess as pva +from epics import camonitor, caget +import numpy as np +import matplotlib.pyplot as plt + +CROP_PADDING = 0 +ROI = 'ROI2' +PVA = 'Pva1' +TEST_ROW = 69 + +class ROICropping: + """ + Crop an image based on the ROI + """ + def __init__(self): + self.channel : pva.Channel = None + self.pva_obj : pva.PVObject = None + + # size + self.image : np.ndarray = None + self.shape : tuple = (0,0) + self.shaped_img : np.ndarray = None + + # cropping + self.cropped_image : np.ndarray = None + + # minx,maxx,miny,maxy + self.crop_size : tuple = (0,0,0,0) + self.cropped_col_avg : float = 0.0 + self.cropped_row_avg : float = 0.0 + + + def crop_img(self) -> None: + """ + This function crops the Pva1 to the size of the ROI + and displays it on matplotlib + + """ + + self.get_image() + self.get_roi() + self.shape_image() + self.crop_shaped_image(ROI_NUM=3) + self.calc_average() + + # DEBUG + # print(f'\ + # {PVA} Image: {self.image}\ + # {ROI}: {self.roi_data}\ + # Image Size: {self.shape}\ + # Crop Size: {self.crop_size}') + # print(f'\ + # Before Crop: {self.image}\ + # After Crop: {self.cropped_image}\n\n\ + # Average Column: {self.cropped_col_avg}\n\n\ + # Average Row: {self.cropped_row_avg}\ + # ') + print(type(self.shaped_img)) + self.display_image() + + + + def get_image(self) -> None: + # Gets a single image + self.channel = pva.Channel(f'dp-ADSim:{PVA}:Image', pva.PVA) + + # The fields I want to be visible + self.pva_obj = self.channel.get('field(value,dimension,timeStamp,uniqueId)') + + # Get and set the image from the dictionary + self.image = self.pva_obj['value'][0]['ubyteValue'] + + + + def get_roi(self) -> None: + # Get the ROI of that single image + self.roi_data = caget(f'dp-ADSim:{ROI}:MinX') + + + + def shape_image(self) -> None: + """ + Turns the PVAObject into an image + + Return: None + """ + # Check if dimensions are in image + if 'dimension' in self.pva_obj: + # grab the shape and store them in a tuple + self.shape = tuple([dim['size'] for dim in self.pva_obj['dimension']]) + + # Reshape into a 2d image + self.shaped_img = np.array(self.image).reshape(self.shape, order='F') + + else: + print(f'Dimension not in {PVA} object') + + + + + def crop_shaped_image(self, ROI_NUM:int = None): + """ + Crops the shaped_img to the specific ROI's size + + Args: ROI_NUM(int) - For the specific ROI you want to crop to + """ + + # There is an ROI provided + if ROI_NUM: + # Get the ROI's dimension + min_x = caget(f'dp-ADSim:ROI{ROI_NUM}:MinX') + min_y = caget(f'dp-ADSim:ROI{ROI_NUM}:MinY') + max_x = caget(f'dp-ADSim:ROI{ROI_NUM}:SizeX') + max_y = caget(f'dp-ADSim:ROI{ROI_NUM}:SizeY') + + # Slice the needed ROI dimensions from the image + self.cropped_image = self.shaped_img[min_x:min_x+max_x, min_y:min_y+max_y] + + # If not provided use the images dimensions + else: + self.crop_size = reversed(self.shaped_img.shape) + self.cropped_image = self.shaped_img + + + + def calc_average(self) -> None: + """ + Calculate the averages of the column and rows + """ + # Average the cropped images column + self.cropped_col_avg = np.mean(self.cropped_image,0) + # Average the cropped images row + self.cropped_row_avg = np.mean(self.cropped_image,1) + + + + def display_image(self): + """ + Display data using matplotlib + """ + + # + X = np.arange(0, self.cropped_image.shape[1], 1.0) + Y = np.arange(0, self.cropped_image.shape[0], 1.0) + + # Used to plot multiple graphs + figure, axis = plt.subplots(2,2) + + # Plot the graphs on the sublot + # Top Right + axis[0,0].title.set_text('Cropped ROI Image') + axis[0,0].imshow(self.cropped_image) + + # Top Left + axis[0,1].title.set_text('Row Average') + axis[0,1].plot(self.cropped_row_avg, Y) + axis[0,1].invert_yaxis() + + # Bottom Right + axis[1,0].title.set_text('Column Average') + axis[1,0].plot(X, self.cropped_col_avg) + + # Bottom Left + axis[1,1].title.set_text('Full Image') + axis[1,1].imshow(self.shaped_img) + + plt.show() + + + +# Call and start function +roi_cropping = ROICropping() +roi_cropping.crop_img() diff --git a/viewer/scan_view.py b/viewer/scan_view.py new file mode 100644 index 0000000..e3243b9 --- /dev/null +++ b/viewer/scan_view.py @@ -0,0 +1,436 @@ +import sys +import toml +from datetime import datetime +from PyQt5 import uic +from PyQt5.QtCore import QThread, pyqtSignal, QTimer, Qt +from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QVBoxLayout +import pyqtgraph as pg + +from utils import PVAReader, HDF5Writer +from epics import caput + +class ScanMonitorWindow(QMainWindow): + signal_start_monitor = pyqtSignal() + + def __init__(self, channel: str = "", config_filepath: str = ""): + super(ScanMonitorWindow, self).__init__() + uic.loadUi('gui/scan_view.ui', self) + # Title comes from UI; ensure consistent naming in code comments + + self.channel = channel + self.config_filepath = config_filepath + self.scan_state = False + + # Track applied state for UI labels + self.applied_channel = None + self.applied_config = None + self._last_frames_received = 0 + + # Define Threads + self.reader_thread = QThread() + self.writer_thread = QThread() + + self.reader: PVAReader = None + self.h5_handler: HDF5Writer = None + + # Timer for updating info display + self.info_timer = QTimer() + self.info_timer.timeout.connect(self._update_info_display) + + # Graph state + self.graph_plot = None + self.graph_curve = None + self.graph_x = [] + self.graph_y = [] + self._frames_baseline = 0 + self.graph_window_seconds = 60 # sliding window length; newest point centered + # Separate timeline for activity monitor (distinct from actual scan time) + self.activity_start_time = None + # Track how long the monitor has been actively listening + self.listening_start_time = None + + # Scan timing variables + self.scan_start_time = None + self.scan_end_time = None + self.last_scan_completion_time = None + + # Setup Initial UI State + self._setup_ui_elements() + self._setup_graph() + + def _setup_ui_elements(self): + if hasattr(self, 'label_mode'): + self.label_mode.setText("") + if hasattr(self, 'label_indicator'): + self.label_indicator.setText('scan: off') + self._apply_indicator_style() + if hasattr(self, 'label_listening'): + # Initialize listening elapsed time display as mm:ss + self.label_listening.setText('00:00') + self._apply_listening_style(False) + + self.lineedit_channel.setText(self.channel or "") + self.lineedit_channel.textChanged.connect(self._on_channel_changed) + self.lineedit_config.setText(self.config_filepath or "") + self.lineedit_config.textChanged.connect(self._on_config_path_changed) + + self.btn_browse_config.clicked.connect(self._on_browse_config_clicked) + self.btn_apply.clicked.connect(self._on_apply_clicked) + + self._update_info_display() + + def _setup_graph(self): + """Initialize the PyQtGraph PlotWidget inside the placeholder widget.""" + try: + if hasattr(self, 'widget_graph') and self.widget_graph is not None: + # Create a layout for the placeholder widget if it doesn't have one + layout = self.widget_graph.layout() if hasattr(self.widget_graph, 'layout') else None + if layout is None: + layout = QVBoxLayout(self.widget_graph) + self.widget_graph.setLayout(layout) + + # Create and configure the plot + self.graph_plot = pg.PlotWidget(background='w') + self.graph_plot.showGrid(x=True, y=True) + self.graph_plot.setLabel('bottom', 'Time', units='s') + self.graph_plot.setLabel('left', 'Frames collected') + # Keep Y auto-range; control X via sliding window + try: + self.graph_plot.enableAutoRange(x=False, y=True) + except Exception: + pass + self.graph_curve = self.graph_plot.plot([], [], pen=pg.mkPen(color=(30, 144, 255), width=2)) + + # Center indicator line; we'll place it at the center of the X range (latest time) + try: + self.center_line = pg.InfiniteLine(angle=90, movable=False, pen=pg.mkPen(color=(128, 128, 128), style=Qt.DashLine)) + self.graph_plot.addItem(self.center_line) + except Exception: + self.center_line = None + + layout.addWidget(self.graph_plot) + self._reset_graph() + except Exception as e: + print(f"Graph setup error: {e}") + + # ================================================================================================ + # CORE LOGIC & THREADING + # ================================================================================================ + + def _on_apply_clicked(self) -> None: + """Initializes Reader and Writer on separate threads.""" + if not self.channel or not self.config_filepath: + return + + self._cleanup_existing_instances() + + try: + # 1. Create instances + self.reader = PVAReader( + input_channel=self.channel, + config_filepath=self.config_filepath, + viewer_type='image' + ) + + self.h5_handler = HDF5Writer( + file_path="", + pva_reader=self.reader + ) + + # 2. Move to specific worker threads + self.reader.moveToThread(self.reader_thread) + self.h5_handler.moveToThread(self.writer_thread) + + # 3. Connect Signals with QueuedConnection to bridge thread boundaries + self.reader.reader_scan_complete.connect(self._on_reader_scan_complete, Qt.QueuedConnection) + self.h5_handler.hdf5_writer_finished.connect(self._on_writer_finished, Qt.QueuedConnection) + self.signal_start_monitor.connect(self.reader.start_channel_monitor, Qt.QueuedConnection) + + if hasattr(self.reader, 'scan_state_changed'): + self.reader.scan_state_changed.connect(self._on_scan_state_changed, Qt.QueuedConnection) + + # 4. Start Thread Event Loops + self.reader_thread.start() + self.writer_thread.start() + + # 5. Begin Monitoring + self.signal_start_monitor.emit() + + # 6. Update UI Tracking + self.applied_channel = self.channel + self.applied_config = self.config_filepath + if hasattr(self, 'label_listening'): + self.label_listening.setText('True') + self._apply_listening_style(True) + + self.info_timer.start(1000) + self._update_info_display() + # For continuous monitoring, start a fresh graph timeline on apply + self._reset_graph() + # Initialize a timeline for the activity monitor (separate from scan time) + self.activity_start_time = datetime.now() + # Reset frames baseline to current reader count if available + try: + self._frames_baseline = int(getattr(self.reader, 'frames_received', 0) or 0) + except Exception: + self._frames_baseline = 0 + + except Exception as e: + print(f"Apply Error: {e}") + self.reader = None + self.h5_handler = None + + def _on_reader_scan_complete(self) -> None: + """Slot executed when PVAReader emits the completion signal.""" + print(f"LOG: reader_scan_complete received by ScanMonitor at {datetime.now()}") + self._trigger_automatic_save() + + def _trigger_automatic_save(self) -> None: + """Triggers the HDF5Writer save process.""" + if self.h5_handler: + print("LOG: Triggering HDF5Writer.save_caches_to_h5...") + # This method runs in the writer_thread due to moveToThread earlier + self.h5_handler.save_caches_to_h5(clear_caches=True, compress=True) + + def _on_writer_finished(self, message: str) -> None: + """Callback when the HDF5 file is finished writing.""" + print(f"LOG: Writer finished - {message}") + if hasattr(self, 'label_indicator'): + self.label_indicator.setText('scan: off') + self._apply_indicator_style() + self.scan_state = False + self._update_button_states() + + def _on_stop_scan_clicked(self) -> None: + if self.reader is None: return + try: + if getattr(self.reader, 'FLAG_PV', ''): + caput(self.reader.FLAG_PV, self.reader.STOP_SCAN) + else: + self.reader.stop_channel_monitor() + # If no flag PV triggers the reader, we trigger the complete sequence manually + self._on_reader_scan_complete() + except Exception as e: + print(f"Manual Stop Error: {e}") + + def _on_start_scan_clicked(self) -> None: + if self.reader: + self.signal_start_monitor.emit() + + def _cleanup_existing_instances(self) -> None: + if self.reader is not None: + try: + self.reader.stop_channel_monitor() + self.reader.reader_scan_complete.disconnect() + self.h5_handler.hdf5_writer_finished.disconnect() + except: pass + + self.reader_thread.quit() + self.reader_thread.wait() + self.writer_thread.quit() + self.writer_thread.wait() + + self.reader = None + self.h5_handler = None + + # ================================================================================================ + # UI STYLING & UPDATES + # ================================================================================================ + + def _on_channel_changed(self, text): + self.channel = text + self.applied_channel = None + if hasattr(self, 'label_listening'): + # Reset listening timer when channel changes + self.listening_start_time = None + self.label_listening.setText('0') + self._apply_listening_style(False) + + def _on_config_path_changed(self, text): + self.config_filepath = text + self.applied_config = None + if hasattr(self, 'label_listening'): + # Reset listening timer when config changes + self.listening_start_time = None + self.label_listening.setText('0') + self._apply_listening_style(False) + + def _on_browse_config_clicked(self): + fname, _ = QFileDialog.getOpenFileName(self, 'Select Config', '', 'TOML (*.toml)') + if fname: self.lineedit_config.setText(fname) + + def _on_scan_state_changed(self, is_on: bool) -> None: + if is_on: + self.scan_start_time = datetime.now() + self.scan_end_time = None + self._reset_graph() + else: + self.scan_end_time = datetime.now() + self.last_scan_completion_time = self.scan_end_time + + self.scan_state = is_on + if hasattr(self, 'label_indicator'): + self.label_indicator.setText('scan: on' if is_on else 'scan: off') + self._apply_indicator_style() + self._update_button_states() + + def _apply_indicator_style(self): + if hasattr(self, 'label_indicator'): + color = "green" if "on" in self.label_indicator.text().lower() else "red" + self.label_indicator.setStyleSheet(f'color: {color}; font-weight: bold;') + + def _apply_listening_style(self, state): + if hasattr(self, 'label_listening'): + color = "green" if state else "red" + self.label_listening.setStyleSheet(f'color: {color}; font-weight: bold;') + + def _update_button_states(self): + if hasattr(self, 'btn_start_scan'): self.btn_start_scan.setEnabled(not self.scan_state) + if hasattr(self, 'btn_stop_scan'): self.btn_stop_scan.setEnabled(self.scan_state) + + def _update_label_from_config(self): + if hasattr(self, 'label_mode'): self.label_mode.setText("") + + def _update_info_display(self): + """Logic for periodically refreshing UI labels based on Reader state.""" + try: + # Update Caching Mode + caching_mode = "Not set" + if self.config_filepath: + try: + with open(self.config_filepath, 'r') as f: + cfg = toml.load(f) + caching_mode = cfg.get('CACHE_OPTIONS', {}).get('CACHING_MODE', 'Not set') + except: pass + if hasattr(self, 'label_caching_mode'): self.label_caching_mode.setText(str(caching_mode)) + + # Update Flag PV + flag_pv = "Not set" + if self.reader and hasattr(self.reader, 'FLAG_PV'): + flag_pv = str(self.reader.FLAG_PV) if self.reader.FLAG_PV else "Not set" + if hasattr(self, 'label_flag_pv'): self.label_flag_pv.setText(flag_pv) + + # Update Monitor Activity + channel_active = "No" + is_listening = False + if self.reader and hasattr(self.reader, 'channel'): + is_active = bool(self.reader.channel.isMonitorActive()) + channel_active = "Yes" if is_active else "No" + is_listening = is_active and (self.applied_channel == self.channel) + + if hasattr(self, 'label_channel_active'): + self.label_channel_active.setText(channel_active) + + # Update Listening label to show elapsed listening time (positive integers) + if hasattr(self, 'label_listening'): + if is_listening: + if self.listening_start_time is None: + self.listening_start_time = datetime.now() + elapsed = int(max(0, (datetime.now() - self.listening_start_time).total_seconds())) + # Format as mm:ss + m, s = divmod(elapsed, 60) + self.label_listening.setText(f"{m:02d}:{s:02d}") + else: + # Reset when not listening + self.listening_start_time = None + self.label_listening.setText('00:00') + self._apply_listening_style(is_listening) + + # Update Timing + if self.scan_start_time: + duration = (self.scan_end_time if self.scan_end_time else datetime.now()) - self.scan_start_time + s = int(duration.total_seconds()) + m, s = divmod(s, 60); h, m = divmod(m, 60) + time_str = f"{h:02d}:{m:02d}:{s:02d}" + if not self.scan_end_time: time_str += " (running)" + if hasattr(self, 'label_scan_time'): self.label_scan_time.setText(time_str) + + if self.last_scan_completion_time and hasattr(self, 'label_last_scan_date'): + self.label_last_scan_date.setText(self.last_scan_completion_time.strftime("%Y-%m-%d %H:%M:%S")) + # Update graph after refreshing labels + self._update_graph() + except: pass + + def _reset_graph(self): + self.graph_x = [] + self.graph_y = [] + if self.graph_curve: + self.graph_curve.setData([], []) + # Reset baseline when graph resets + try: + self._frames_baseline = int(getattr(self.reader, 'frames_received', 0) or 0) + except Exception: + self._frames_baseline = 0 + # Restart activity monitor timeline + self.activity_start_time = datetime.now() + + def _update_graph(self): + """Append the latest frame count against elapsed time and update the curve.""" + try: + if not self.graph_curve or not self.graph_plot: + return + # Use activity monitor time (separate from actual scan time) + if self.activity_start_time and self.reader is not None: + # Only update when monitor is actively listening + is_active = False + is_listening = False + try: + if hasattr(self.reader, 'channel') and self.reader.channel is not None: + is_active = bool(self.reader.channel.isMonitorActive()) + # Listening indicates we applied the current channel successfully + is_listening = (self.applied_channel == self.channel) and is_active + except Exception: + is_active = False + is_listening = False + if not (is_active and is_listening): + return + t = int(max(0, (datetime.now() - self.activity_start_time).total_seconds())) + # Prefer frames collected during active caching; fallback to total frames_received + frames_total = getattr(self.reader, 'frames_received', None) + if frames_total is None: + return + # Continuous monitor: always update regardless of scan_state + self.graph_x.append(t) + # Plot delta frames collected since baseline + try: + delta = int(frames_total) - int(self._frames_baseline) + self.graph_y.append(max(0, int(delta))) + except Exception: + self.graph_y.append(int(max(0, frames_total))) + # Keep last N points to avoid excessive memory (e.g., last 600 seconds) + max_points = 600 + if len(self.graph_x) > max_points: + self.graph_x = self.graph_x[-max_points:] + self.graph_y = self.graph_y[-max_points:] + self.graph_curve.setData(self.graph_x, self.graph_y) + + # Sliding window: keep newest time centered; move the view range accordingly + try: + half = self.graph_window_seconds / 2.0 + # Clamp x_min to 0 to avoid negative time display + x_min = max(0.0, float(t) - half) + x_max = x_min + self.graph_window_seconds + self.graph_plot.setXRange(x_min, x_max, padding=0) + if self.center_line: + self.center_line.setPos(float(t)) + except Exception as _: + pass + except Exception as e: + print(f"Graph update error: {e}") + + def closeEvent(self, event): + self.info_timer.stop() + self._cleanup_existing_instances() + super().closeEvent(event) + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser(description='Scan Monitor Window') + parser.add_argument('--channel', default='', help='PVA channel name') + parser.add_argument('--config', dest='config_path', default='', help='Path to TOML config file') + args = parser.parse_args() + + app = QApplication(sys.argv) + window = ScanMonitorWindow(channel=args.channel, config_filepath=args.config_path) + window.show() + sys.exit(app.exec_()) \ No newline at end of file diff --git a/viewer/settings/settings_dialog.py b/viewer/settings/settings_dialog.py new file mode 100644 index 0000000..903ca9d --- /dev/null +++ b/viewer/settings/settings_dialog.py @@ -0,0 +1,22 @@ +import sys +from PyQt5 import QtWidgets, uic + + +class SettingsDialog(QtWidgets.QDialog): + def __init__(self, parent=None): + super(SettingsDialog, self).__init__(parent) + # Load the placeholder UI + uic.loadUi('gui/settings/settings_dialog.ui', self) + + # Wire dialog buttons if present + button_box = getattr(self, 'buttonBox', None) + if button_box is not None: + button_box.accepted.connect(self.accept) + button_box.rejected.connect(self.reject) + + +if __name__ == '__main__': + app = QtWidgets.QApplication(sys.argv) + dlg = SettingsDialog() + dlg.show() + sys.exit(app.exec_()) diff --git a/viewer/tools/metadata_converter_gui.py b/viewer/tools/metadata_converter_gui.py new file mode 100644 index 0000000..bf4f00f --- /dev/null +++ b/viewer/tools/metadata_converter_gui.py @@ -0,0 +1,186 @@ +import sys +import os +from pathlib import Path +from typing import List + +from PyQt5 import uic +from PyQt5.QtWidgets import ( + QApplication, QDialog, QFileDialog, QMessageBox +) + +import h5py + +from utils.metadata_converter import convert_files_or_dir + + +def is_already_formatted(h5_path: Path, base_group: str) -> bool: + """Return True if base_group/HKL exists AND there is at least one dataset named + 'NAME' under base_group/HKL/motor_positions (recursively). + """ + try: + with h5py.File(str(h5_path), 'r') as h5: + hkl_group = f"{base_group}/HKL" + if hkl_group not in h5: + return False + motor_root = f"{hkl_group}/motor_positions" + if motor_root not in h5: + return False + + found_name = False + + def visitor(name, obj): + nonlocal found_name + if found_name: + return + try: + # Check leaf name equals 'NAME' and object is a dataset + leaf = name.split('/')[-1] + if leaf == 'NAME' and isinstance(obj, h5py.Dataset): + found_name = True + except Exception: + pass + + h5[motor_root].visititems(visitor) + return found_name + except Exception: + return False + + +class MetadataConverterDialog(QDialog): + def __init__(self): + super().__init__() + uic.loadUi('gui/tools/metadata_converter.ui', self) + + # Wire up buttons + if hasattr(self, 'btn_browse_hdf5_file'): + self.btn_browse_hdf5_file.clicked.connect(self._browse_hdf5_file) + if hasattr(self, 'btn_browse_hdf5_dir'): + self.btn_browse_hdf5_dir.clicked.connect(self._browse_hdf5_dir) + if hasattr(self, 'btn_browse_toml'): + self.btn_browse_toml.clicked.connect(self._browse_toml) + if hasattr(self, 'btn_convert'): + self.btn_convert.clicked.connect(self._convert) + if hasattr(self, 'btn_close'): + self.btn_close.clicked.connect(self.close) + + # Defaults (ensure they exist if UI changed) + if hasattr(self, 'txt_base_group') and not self.txt_base_group.text(): + self.txt_base_group.setText('entry/data/metadata') + if hasattr(self, 'chk_include'): + self.chk_include.setChecked(True) + if hasattr(self, 'chk_in_place'): + self.chk_in_place.setChecked(True) + + # ---------- Browsers ---------- + def _browse_hdf5_file(self): + fname, _ = QFileDialog.getOpenFileName(self, 'Select HDF5 file', '', 'HDF5 Files (*.h5 *.hdf5);;All Files (*)') + if fname and hasattr(self, 'txt_hdf5_path'): + self.txt_hdf5_path.setText(fname) + + def _browse_hdf5_dir(self): + dname = QFileDialog.getExistingDirectory(self, 'Select directory containing HDF5 files', '') + if dname and hasattr(self, 'txt_hdf5_path'): + self.txt_hdf5_path.setText(dname) + + def _browse_toml(self): + fname, _ = QFileDialog.getOpenFileName(self, 'Select TOML mapping file', '', 'TOML Files (*.toml);;All Files (*)') + if fname and hasattr(self, 'txt_toml_path'): + self.txt_toml_path.setText(fname) + + # ---------- Conversion ---------- + def _append_log(self, text: str): + if hasattr(self, 'txt_log'): + self.txt_log.append(text) + + def _validate_inputs(self) -> tuple: + hdf5_path = self.txt_hdf5_path.text().strip() if hasattr(self, 'txt_hdf5_path') else '' + toml_path = self.txt_toml_path.text().strip() if hasattr(self, 'txt_toml_path') else '' + base_group = self.txt_base_group.text().strip() if hasattr(self, 'txt_base_group') else 'entry/data/metadata' + include = bool(self.chk_include.isChecked()) if hasattr(self, 'chk_include') else True + in_place = bool(self.chk_in_place.isChecked()) if hasattr(self, 'chk_in_place') else True + + if not toml_path: + QMessageBox.warning(self, 'Missing TOML', 'Please select a TOML mapping file.') + return '', '', '', False, False + if not hdf5_path: + QMessageBox.warning(self, 'Missing Source', 'Please select a HDF5 file or directory.') + return '', '', '', False, False + return hdf5_path, toml_path, base_group, include, in_place + + def _convert(self): + hdf5_path, toml_path, base_group, include, in_place = self._validate_inputs() + if not hdf5_path: + return + + src = Path(hdf5_path) + converted_count = 0 + skipped_count = 0 + errors: List[str] = [] + + try: + if src.is_file(): + # single file + if is_already_formatted(src, base_group): + self._append_log(f"Skip (already formatted): {src}") + skipped_count += 1 + else: + try: + outputs = convert_files_or_dir( + toml_path=toml_path, + hdf5_path=str(src), + base_group=base_group, + include=include, + in_place=in_place, + recursive=False + ) + converted_count += 1 if outputs else 0 + self._append_log(f"Converted: {src}") + except Exception as e: + errors.append(f"{src}: {e}") + self._append_log(f"Error converting {src}: {e}") + elif src.is_dir(): + # directory: recurse + files = list(src.rglob('*.h5')) + if not files: + self._append_log('No .h5 files found in directory.') + for f in files: + if is_already_formatted(f, base_group): + skipped_count += 1 + self._append_log(f"Skip (already formatted): {f}") + continue + try: + outputs = convert_files_or_dir( + toml_path=toml_path, + hdf5_path=str(f), + base_group=base_group, + include=include, + in_place=in_place, + recursive=False + ) + converted_count += 1 if outputs else 0 + self._append_log(f"Converted: {f}") + except Exception as e: + errors.append(f"{f}: {e}") + self._append_log(f"Error converting {f}: {e}") + else: + QMessageBox.critical(self, 'Invalid Path', 'The selected HDF5 path is not a file or directory.') + return + finally: + summary = f"Converted {converted_count} HDF5 file(s)." + if skipped_count: + summary += f" Skipped {skipped_count} already formatted." + if errors: + summary += f" Errors: {len(errors)}" + self._append_log(summary) + QMessageBox.information(self, 'Conversion Summary', summary) + + +def main(): + app = QApplication(sys.argv) + dlg = MetadataConverterDialog() + dlg.show() + app.exec_() + + +if __name__ == '__main__': + main() diff --git a/viewer/views_registry/__init__.py b/viewer/views_registry/__init__.py new file mode 100644 index 0000000..c4e3fa2 --- /dev/null +++ b/viewer/views_registry/__init__.py @@ -0,0 +1,5 @@ +"""Registry package for dynamic view buttons in the Launcher. + +Add new view definitions in registry.py (VIEWS list) to auto-populate +buttons in the launcher. +""" diff --git a/viewer/views_registry/registry.py b/viewer/views_registry/registry.py new file mode 100644 index 0000000..14bc497 --- /dev/null +++ b/viewer/views_registry/registry.py @@ -0,0 +1,13 @@ +import sys + +# Define view entries to be rendered as buttons in the Launcher. +# To add a new view, append another dict with the same keys. +VIEWS = [ + { + 'key': 'scan_monitors', + 'label': 'Scan Monitors', + 'cmd': [sys.executable, 'dashpva.py', 'view', 'scan'], + 'running_text': 'Scan Monitors — Running…', + 'tooltip': 'Open Scan Monitors (CLI: dashpva.py view scan)' + }, +] diff --git a/viewer/workbench/doc/index.html b/viewer/workbench/doc/index.html new file mode 100644 index 0000000..0d2ab75 --- /dev/null +++ b/viewer/workbench/doc/index.html @@ -0,0 +1,66 @@ + + + + + + Workbench Documentation + + + +

Workbench Viewer Documentation

+
Overview and usage notes for the Workbench.
+ +

The Workbench viewer provides 2D, 3D, and 1D visualization of HDF5 datasets along with tools for ROI and speckle analysis.

+ +

Features

+
    +
  • Load single HDF5 files or entire folders
  • +
  • Visualize datasets in 2D and 3D (when PyVista is available)
  • +
  • ROI creation and statistics
  • +
  • Playback of 3D stacks with FPS control
  • +
+ +

How to open this documentation

+

Use the menu Documentation → Open Documentation or press F1 within the Workbench window.

+ +

Directory structure

+
+viewer/
+  workbench/
+    workbench.py
+    doc/
+      index.html   ← you are here
+      README.md    ← optional alternative
+  
+
+

2D Viewer

+

+ Opening files + You can choose to open a file or a folder using the file button (h5 formats only) + Once selected selectable datasets will be highlighted in blue + You can edit the file however you want by right clicking on that file + +

+

ROI + You can select to draw as many ROI's as you want by clicking draw ROI + You will be able to see it docked on the right or left side this can be docked anywhere on the right or left window +

+ +

Player

+
+
+

3D Viewer

+
+
+ File edits at viewer/workbench/doc/index.html. +
+ + diff --git a/viewer/workbench/dock_window.py b/viewer/workbench/dock_window.py new file mode 100644 index 0000000..00a661d --- /dev/null +++ b/viewer/workbench/dock_window.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +""" +Dock Window +A lightweight secondary QMainWindow intended to host dockable tools. +Modeless (does not disable the main Workbench window), can be filled +with QDockWidget-based panels like ROI Plot and ROI Math. +""" + +from PyQt5.QtWidgets import QMainWindow, QWidget, QDockWidget, QLabel +from PyQt5.QtCore import Qt + + +class DockWindow(QMainWindow): + """ + Secondary window for hosting dockable tools. + + Attributes: + main (QMainWindow): Reference to the primary Workbench window for callbacks. + """ + + def __init__(self, main_window, title: str = None, width: int = 1000, height: int = 700): + super().__init__(parent=None) # top-level, modeless + self.main = main_window + self.setWindowTitle(title or "Dock Window") + self.resize(width, height) + + # Central placeholder; docks will live around this + central = QWidget(self) + self.setCentralWidget(central) + + # Create an initial empty dock so users can dock panels into this window + try: + self.empty_dock = QDockWidget("Dock", self) + self.empty_dock.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) + placeholder = QLabel("Empty Dock — you can add dockable panels here", self.empty_dock) + placeholder.setAlignment(Qt.AlignCenter) + self.empty_dock.setWidget(placeholder) + self.addDockWidget(Qt.RightDockWidgetArea, self.empty_dock) + self.empty_dock.show() + except Exception: + # Even if dock creation fails, the window remains usable + pass + + # Ensure deletion on close; Workbench keeps reference list to avoid GC while open + try: + self.setAttribute(Qt.WA_DeleteOnClose, True) + except Exception: + pass + + def show_and_focus(self) -> None: + """Show the window modeless and bring it to the foreground.""" + try: + self.show() + self.raise_() + self.activateWindow() + except Exception: + # Best-effort foregrounding only + self.show() diff --git a/viewer/workbench/docks/base_dock.py b/viewer/workbench/docks/base_dock.py new file mode 100644 index 0000000..ba87478 --- /dev/null +++ b/viewer/workbench/docks/base_dock.py @@ -0,0 +1,23 @@ +from PyQt5.QtWidgets import QDockWidget, QAction +from PyQt5.QtCore import Qt + +class BaseDock(QDockWidget): + def __init__(self, title="", main_window=None, segment_name=None, dock_area=Qt.LeftDockWidgetArea): + super().__init__(title, main_window) + self.title = title + self.main_window = main_window + self.segment_name = (segment_name or "").strip().lower() if segment_name is not None else None + self.dock_area = dock_area + self.setup() + + def setup(self): + # Dock + self.setWindowTitle(self.title) + self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) + self.main_window.addDockWidget(self.dock_area, self) + + # Register dock toggle under segmented Windows menu via BaseWindow helper + self.action_window_dock = self.main_window.add_dock_toggle_action( + self, self.title, segment_name=self.segment_name + ) + self.visibilityChanged.connect(lambda visible: self.action_window_dock.setChecked(bool(visible))) diff --git a/viewer/workbench/docks/dash_ai.py b/viewer/workbench/docks/dash_ai.py new file mode 100644 index 0000000..669f260 --- /dev/null +++ b/viewer/workbench/docks/dash_ai.py @@ -0,0 +1,53 @@ +from viewer.workbench.docks.base_dock import BaseDock +from viewer.base_window import BaseWindow +from PyQt5.QtWidgets import QGroupBox, QMessageBox, QLabel, QVBoxLayout, QLineEdit, QPushButton +from PyQt5.QtCore import Qt + + +class DashAI(BaseDock): + """ + DashAI dockable window. + """ + + def __init__(self, title="DashAI", main_window: BaseWindow=None, segment_name="2d", dock_area=Qt.RightDockWidgetArea): + # Call BaseDock with segment routing + super().__init__(title, main_window, segment_name=segment_name, dock_area=dock_area) + # Build the dock UI contents + self.build_dock() + + def connect_all(self): + self.btn_segment.clicked.connect(self.run_segmentation) + + def build_dock(self): + self.gb_dash_sam = QGroupBox(self.title) + layout = QVBoxLayout() # You need a layout to hold widgets + + # Segmentation setup + # Use a QLabel for instructions + self.prompt_label = QLabel( + "Instructions:
" + "1. Click on the image to select points.
" + "2. Press 'Segment' to run DashAI.
" + "Add a prompt or message for DashAI to read" + ) + self.prompt_label.setWordWrap(True) + layout.addWidget(self.prompt_label) + + # 2. The Input Box (Where the user types) + self.text_prompt_input = QLineEdit() + self.text_prompt_input.setPlaceholderText("e.g., 'segment the large crystal'...") + layout.addWidget(self.text_prompt_input) + + # 3. Action Button + self.btn_segment = QPushButton("Run DashAI Segmentation") + self.btn_segment.setStyleSheet("background-color: #4CAF50; color: white; font-weight: bold;") + # Connect this button to your SAM function later + # self.btn_segment.clicked.connect(self.run_segmentation) + layout.addWidget(self.btn_segment) + + layout.addStretch() # Keeps everything at the top + self.gb_dash_sam.setLayout(layout) + self.setWidget(self.gb_dash_sam) + + def run_segmentation(self): + print("Running segmentation called will be implemented soon") diff --git a/viewer/workbench/docks/data_structure.py b/viewer/workbench/docks/data_structure.py new file mode 100644 index 0000000..a8cef8e --- /dev/null +++ b/viewer/workbench/docks/data_structure.py @@ -0,0 +1,292 @@ +from viewer.workbench.docks.base_dock import BaseDock +from viewer.base_window import BaseWindow +from viewer.workbench.workers import DatasetLoader +from PyQt5.QtCore import Qt, QThread +from PyQt5.QtWidgets import QAction, QVBoxLayout, QHBoxLayout, QTreeWidget, QGroupBox, QPushButton, QMessageBox +from utils.hdf5_loader import HDF5Loader +import os + +class DataStructureDock(BaseDock): + def __init__(self, title="Data Structure", main_window:BaseWindow=None, segment_name="other", dock_area=Qt.LeftDockWidgetArea): + super().__init__(title, main_window, segment_name=segment_name, dock_area=dock_area) + self.title = title + self.main_window = main_window + # Parent BaseDock.__init__ already performs setup; no need to call again + self.build_dock() + + def setup(self): + try: + # Delegate core docking and Windows menu registration to BaseDock.setup + super().setup() + try: + self.connect() + except Exception: + pass + except Exception as e: + print(e) + pass + + def build_dock(self): + """Build the UI Dock""" + # Create a group box to mirror the Workbench's "Data Structure" panel + self.gb_data_structure = QGroupBox() + self.gb_data_structure.setObjectName("groupBox_dataTree") + + # Create the tree widget that will display the hierarchical dataset + self.tree_data = QTreeWidget() + self.tree_data.setObjectName("tree_data") + self.tree_data.setHeaderHidden(True) + + # Layout the tree inside the group box with a simple Refresh button + layout = QVBoxLayout() + layout.setContentsMargins(8, 8, 8, 8) + header = QHBoxLayout() + self.btn_refresh = QPushButton("Refresh") + try: + self.btn_refresh.setToolTip("Refresh Data Structure") + self.btn_refresh.clicked.connect(self._on_refresh_clicked) + except Exception: + pass + header.addWidget(self.btn_refresh) + # Add Load Dataset button next to Refresh + self.btn_load = QPushButton("Load Dataset") + try: + self.btn_load.setToolTip("Load default dataset for selected file or the selected dataset") + self.btn_load.clicked.connect(self._on_load_clicked) + except Exception: + pass + header.addWidget(self.btn_load) + header.addStretch(1) + layout.addLayout(header) + layout.addWidget(self.tree_data) + self.gb_data_structure.setLayout(layout) + + # Set the group box as the dock's main widget + self.setWidget(self.gb_data_structure) + + def _on_refresh_clicked(self): + try: + # Perform refresh within the dock, using the same functions that populated the tree originally + self.refresh_data_structure_display() + except Exception as e: + QMessageBox.critical(self, "Refresh Error", f"Failed to refresh: {e}") + + def _on_load_clicked(self): + try: + tree = getattr(self, 'tree_data', None) + mw = getattr(self, 'main_window', None) + if tree is None or mw is None: + QMessageBox.information(self, "Load Dataset", "Tree or main window not available.") + return + item = tree.currentItem() + if item is None: + # If no selection, try current file + fp = getattr(mw, 'current_file_path', None) + if fp and os.path.exists(fp): + self._load_main_data_for_path(fp) + else: + QMessageBox.information(self, "Load Dataset", "No selection or current file.") + return + item_type = item.data(0, Qt.UserRole + 2) + if item_type == "file_root": + path = item.data(0, Qt.UserRole + 1) + if path and os.path.exists(path): + self._load_main_data_for_path(path) + else: + QMessageBox.information(self, "Load Dataset", "Selected file is not available.") + return + # If a dataset/group is selected, try to load that selection; otherwise fall back to default + full_path = item.data(0, 32) # Qt.UserRole = 32 + if full_path: + self._ensure_current_file_from_item(item) + try: + mw.selected_dataset_path = full_path + except Exception: + pass + mw.start_dataset_load() + else: + fp = getattr(mw, 'current_file_path', None) + if fp and os.path.exists(fp): + self._load_main_data_for_path(fp) + else: + QMessageBox.information(self, "Load Dataset", "No dataset path found.") + except Exception as e: + QMessageBox.critical(self, "Load Error", f"Failed to load dataset: {e}") + + def _ensure_current_file_from_item(self, item): + try: + mw = getattr(self, 'main_window', None) + cur = item + while cur is not None: + t = cur.data(0, Qt.UserRole + 2) + if t == "file_root": + fp = cur.data(0, Qt.UserRole + 1) + if fp and mw is not None: + mw.current_file_path = fp + break + cur = cur.parent() + except Exception: + pass + + def connect(self): + try: + if hasattr(self, 'tree_data') and self.tree_data is not None: + try: + self.tree_data.itemClicked.connect(self._on_tree_item_clicked) + except Exception: + pass + except Exception: + pass + + def _on_tree_item_clicked(self, item, column): + try: + # If a file root is clicked, load its default main dataset into the active workspace + item_type = item.data(0, Qt.UserRole + 2) + mw = getattr(self, 'main_window', None) + if item_type == "file_root": + path = item.data(0, Qt.UserRole + 1) + if path and os.path.exists(path): + self._load_main_data_for_path(path) + else: + # If a dataset node is clicked, load that dataset into the active workspace + full_path = item.data(0, 32) + if full_path and mw is not None: + # Ensure the main window knows the current file + cur = item + while cur is not None: + t = cur.data(0, Qt.UserRole + 2) + if t == "file_root": + fp = cur.data(0, Qt.UserRole + 1) + if fp: + mw.current_file_path = fp + break + cur = cur.parent() + try: + mw.selected_dataset_path = full_path + except Exception: + pass + mw.start_dataset_load() + except Exception: + pass + + def refresh_data_structure_display(self, file_path=None): + """Refresh the data tree by reloading currently listed top-level items (files and folder sections). + This uses the main window's existing load functions to ensure identical population behavior. + If a specific file_path is provided, it will attempt to refresh only that entry; otherwise, refreshes all.""" + try: + tree = getattr(self, 'tree_data', None) + if tree is None: + QMessageBox.information(self, "Refresh", "Data tree is not available.") + return + mw = getattr(self, 'main_window', None) + if mw is None: + QMessageBox.information(self, "Refresh", "Main window is not available.") + return + + # Snapshot existing top-level items and their paths/types + snapshot = [] + try: + if file_path: + # If a specific path was requested, try to locate it among top-level items + for i in range(tree.topLevelItemCount()): + item = tree.topLevelItem(i) + path = item.data(0, Qt.UserRole + 1) + if path == file_path: + item_type = item.data(0, Qt.UserRole + 2) + snapshot.append((item_type, path)) + break + else: + for i in range(tree.topLevelItemCount()): + item = tree.topLevelItem(i) + item_type = item.data(0, Qt.UserRole + 2) + path = item.data(0, Qt.UserRole + 1) + snapshot.append((item_type, path)) + except Exception: + snapshot = [] + + # Clear the tree before repopulating + try: + tree.clear() + except Exception: + pass + + # Rebuild using the same loading functions used initially + rebuilt_any = False + for item_type, path in snapshot: + if not path: + continue + try: + if item_type == "file_root" and os.path.exists(path) and hasattr(mw, 'load_single_h5_file'): + mw.load_single_h5_file(path) + rebuilt_any = True + elif item_type == "folder_section" and os.path.isdir(path) and hasattr(mw, 'load_folder_content'): + mw.load_folder_content(path) + rebuilt_any = True + except Exception as e: + print(f"[DataStructureDock] Refresh failed for {path}: {e}") + + # Fallback: if nothing was rebuilt, try the current file + if not rebuilt_any: + fp = getattr(mw, 'current_file_path', None) + try: + if fp and os.path.exists(fp) and hasattr(mw, 'load_single_h5_file'): + mw.load_single_h5_file(fp) + rebuilt_any = True + except Exception: + pass + + if rebuilt_any: + # Suppress popup per user request - update status silently if available + if hasattr(self, 'update_status'): + try: + self.update_status("Data structure refreshed.") + except Exception: + pass + # If a dataset is selected, start background load to keep UI responsive + try: + if mw is not None and getattr(mw, 'selected_dataset_path', None): + mw.start_dataset_load() + except Exception: + pass + else: + # Suppress popup per user request - update status silently if available + if hasattr(self, 'update_status'): + try: + self.update_status("Nothing to refresh.") + except Exception: + pass + # If a dataset is selected, start background load to keep UI responsive + try: + if mw is not None and getattr(mw, 'selected_dataset_path', None): + mw.start_dataset_load() + except Exception: + pass + except Exception as e: + QMessageBox.critical(self, "Refresh Error", f"Failed to refresh: {e}") + + def _load_data_structure(self): + try: + folder_name = os.path.basename(self.current_file_path) + except Exception as e: + QMessageBox.critical(self, "Error", f"Failed to load folder: {str(e)}") + self.update_status("Failed to load folder") + + def _populate_tree_recursive(self): + pass + + def _start_dataset_load(self): + """Create a worker thread to load dataset without blocking the UI.""" + try: + self.update_status(f"Loading dataset: {self.selected_dataset_path}") + self._dataset_thread = QThread() + self._dataset_worker = DatasetLoader(self.current_file_path, self.selected_dataset_path) + self._dataset_worker.moveToThread(self._dataset_thread) + self._dataset_thread.started.connect(self._dataset_worker.run) + self._dataset_worker.loaded.connect(self.on_dataset_loaded) + self._dataset_worker.failed.connect(self.on_dataset_failed) + # Ensure thread quits after work + self._dataset_worker.loaded.connect(self._dataset_thread.quit) + self._dataset_worker.failed.connect(self._dataset_thread.quit) + self._dataset_thread.start() + except Exception as e: + self.update_status(f"Error starting dataset load: {e}") diff --git a/viewer/workbench/docks/info_2d_dock.py b/viewer/workbench/docks/info_2d_dock.py new file mode 100644 index 0000000..16b89c4 --- /dev/null +++ b/viewer/workbench/docks/info_2d_dock.py @@ -0,0 +1,118 @@ +from typing import Optional +import numpy as np +from PyQt5.QtCore import Qt + +from viewer.workbench.docks.information_dock_base import InformationDockBase + + +class Info2DDock(InformationDockBase): + """Information dock specialized for 2D viewing state. + + Shows number of points in the current frame and X/Y axis variable labels. + """ + + def __init__( + self, + main_window=None, + title: str = "2D Info", + segment_name: Optional[str] = "2d", + dock_area: Qt.DockWidgetArea = Qt.RightDockWidgetArea, + ): + super().__init__(title=title, main_window=main_window, segment_name=segment_name, dock_area=dock_area) + + def refresh(self) -> None: + """Refresh the displayed information based on the main window's 2D state.""" + mw = getattr(self, 'main_window', None) + if mw is None: + return + # Try to keep mouse info consistent when refresh occurs + try: + xy = getattr(mw, '_last_hover_xy', None) + frame = mw.get_current_frame_data() if hasattr(mw, 'get_current_frame_data') else None + intensity = None + H_val = K_val = L_val = None + pos = None + # Populate Mouse HKL even if no hover yet by falling back to center pixel + if frame is not None and frame.ndim == 2: + h, w = frame.shape + # Validate hover position + if xy is not None: + try: + x_hover, y_hover = int(xy[0]), int(xy[1]) + if 0 <= x_hover < w and 0 <= y_hover < h: + x, y = x_hover, y_hover + pos = (x, y) + else: + x, y = w // 2, h // 2 + pos = (x, y) + except Exception: + x, y = w // 2, h // 2 + pos = (x, y) + else: + x, y = w // 2, h // 2 + pos = (x, y) + # Intensity at chosen position + try: + intensity = float(frame[y, x]) + except Exception: + intensity = None + # HKL from cached q-grids if present + try: + qxg = getattr(mw, '_qx_grid', None) + qyg = getattr(mw, '_qy_grid', None) + qzg = getattr(mw, '_qz_grid', None) + if qxg is not None and qyg is not None and qzg is not None: + if qxg.ndim == 3: + idx = int(mw.frame_spinbox.value()) if hasattr(mw, 'frame_spinbox') and mw.frame_spinbox.isEnabled() else 0 + if 0 <= idx < qxg.shape[0]: + H_val = float(qxg[idx, y, x]); K_val = float(qyg[idx, y, x]); L_val = float(qzg[idx, y, x]) + elif qxg.ndim == 2: + H_val = float(qxg[y, x]); K_val = float(qyg[y, x]); L_val = float(qzg[y, x]) + except Exception: + H_val = K_val = L_val = None + # Update Mouse section in the dock + self.set_mouse_info(pos, intensity, H_val, K_val, L_val) + except Exception: + pass + # Points: show total points across data dimensions (include product, e.g., FxHxW = N) + points_str = None + low_val = None + high_val = None + try: + data = getattr(mw, 'current_2d_data', None) + if isinstance(data, np.ndarray): + # total points + total = int(data.size) + points_str = f"{total:,}" + # intensity low/high across dataset + try: + low_val = float(np.min(data)) + except Exception: + low_val = None + try: + high_val = float(np.max(data)) + except Exception: + high_val = None + except Exception: + points_str = None + self.set_points(points_str) + try: + self.set_intensity(low_val, high_val) + except Exception: + pass + # Axes: from WorkbenchWindow axis variables; annotate default source axes + try: + xlab = getattr(mw, 'axis_2d_x', None) + ylab = getattr(mw, 'axis_2d_y', None) + dx = xlab + dy = ylab + try: + if isinstance(xlab, str) and xlab.strip().lower() in ("columns", "column"): + dx = f"{xlab}(Source)" + if isinstance(ylab, str) and ylab.strip().lower() in ("row", "rows"): + dy = f"{ylab}(Source)" + except Exception: + pass + except Exception: + dx = None; dy = None + self.set_axes(dx, dy) diff --git a/viewer/workbench/docks/info_3d_dock.py b/viewer/workbench/docks/info_3d_dock.py new file mode 100644 index 0000000..090f5ec --- /dev/null +++ b/viewer/workbench/docks/info_3d_dock.py @@ -0,0 +1,246 @@ +from typing import Optional, Tuple +import numpy as np + +from PyQt5.QtCore import Qt +from PyQt5.QtWidgets import QLabel, QFormLayout + +from viewer.workbench.docks.information_dock_base import InformationDockBase + + +class Info3DDock(InformationDockBase): + """Information dock specialized for 3D slice state (HKL only). + + Programmatically augments the base InformationDock UI with 3D-specific rows: + - Orientation (HK, KL, HL, or Custom) + - Slice position (orthogonal HKL axis/value; e.g., L = 1.23456, or n·origin = value) + - Origin (H,K,L) with 5 decimals + - Normal (H,K,L) with 5 decimals + - H/K/L ranges across current slice points (min..max) + - Image size (HxW) for rasterization target + + Reuses base fields: Total Points, Intensity Low/High. + """ + + def __init__( + self, + main_window=None, + title: str = "3D Info", + segment_name: Optional[str] = "3d", + dock_area: Qt.DockWidgetArea = Qt.RightDockWidgetArea, + ): + super().__init__(title=title, main_window=main_window, segment_name=segment_name, dock_area=dock_area) + self._setup_extra_rows() + + # UI augmentation + def _setup_extra_rows(self) -> None: + try: + form: QFormLayout = self._widget.findChild(QFormLayout, "formLayout") + if form is None: + return + # Helper to add a row and keep refs + def add_row(caption: str, obj_name: str) -> QLabel: + cap = QLabel(caption, self._widget) + val = QLabel("—", self._widget) + val.setObjectName(obj_name) + form.addRow(cap, val) + return val + + # Orientation + self.lbl_orientation = add_row("Orientation:", "lbl_orientation") + self.lbl_orientation.setToolTip("Slice plane orientation in HKL coordinates") + # Slice position + self.lbl_slice_pos = add_row("Slice Position:", "lbl_slice_pos") + self.lbl_slice_pos.setToolTip("Orthogonal axis value for axis-aligned planes, or n·origin for custom") + # Origin + self.lbl_origin = add_row("Origin (H,K,L):", "lbl_origin") + self.lbl_origin.setToolTip("Slice plane origin in HKL coordinates") + # Normal + self.lbl_normal = add_row("Normal (H,K,L):", "lbl_normal") + self.lbl_normal.setToolTip("Slice plane normal in HKL coordinates") + # Ranges + self.lbl_H_range = add_row("H range:", "lbl_H_range") + self.lbl_K_range = add_row("K range:", "lbl_K_range") + self.lbl_L_range = add_row("L range:", "lbl_L_range") + self.lbl_H_range.setToolTip("Min..Max over slice points H component") + self.lbl_K_range.setToolTip("Min..Max over slice points K component") + self.lbl_L_range.setToolTip("Min..Max over slice points L component") + # Image size + self.lbl_image_size = add_row("Image size:", "lbl_image_size") + self.lbl_image_size.setToolTip("Rasterization target size (HxW)") + except Exception: + pass + + # Public API + def update_from_slice( + self, + slice_mesh, + normal: np.ndarray, + origin: np.ndarray, + target_shape: Optional[Tuple[int, int]] = None, + ) -> None: + """Update all labels based on a PyVista slice mesh and plane definition. + - HKL-only computations (no U/V). + - Intensity low/high from slice_mesh['intensity'] if present. + - Image size prefers provided target_shape; falls back to 512×512. + """ + try: + # Points + try: + npts = int(getattr(slice_mesh, 'n_points', 0)) + except Exception: + npts = None + self.set_points(npts) + + # Intensities + low = high = None + try: + vals = np.asarray(slice_mesh["intensity"], dtype=float).ravel() + if vals.size > 0: + low = float(np.min(vals)) + high = float(np.max(vals)) + except Exception: + pass + self.set_intensity(low, high) + + # Origin, Normal formatting (5 decimals) + def fmt5(x: float) -> str: + try: + return f"{float(x):.5f}" + except Exception: + return str(x) + + try: + o = np.array(origin, dtype=float).reshape(3) + except Exception: + o = np.array([np.nan, np.nan, np.nan], dtype=float) + try: + n = np.array(normal, dtype=float).reshape(3) + except Exception: + n = np.array([0.0, 0.0, 1.0], dtype=float) + + try: + self.lbl_origin.setText(f"({fmt5(o[0])}, {fmt5(o[1])}, {fmt5(o[2])})") + except Exception: + pass + try: + self.lbl_normal.setText(f"({fmt5(n[0])}, {fmt5(n[1])}, {fmt5(n[2])})") + except Exception: + pass + + # Orientation and orthogonal axis + orientation, uv_idxs, orth_label = self._infer_orientation_and_axes(n) + try: + orient_txt = orientation if orientation == "Custom" else f"{orientation} plane" + self.lbl_orientation.setText(orient_txt) + except Exception: + pass + + # Slice position + try: + orth_val = None + if orth_label == "L": + orth_val = float(o[2]) + elif orth_label == "H": + orth_val = float(o[0]) + elif orth_label == "K": + orth_val = float(o[1]) + if orth_val is not None: + self.lbl_slice_pos.setText(f"{orth_label} = {fmt5(orth_val)}") + else: + # Custom: n·origin + try: + n_unit = n / (np.linalg.norm(n) or 1.0) + except Exception: + n_unit = n + try: + val = float(np.dot(n_unit, o)) + except Exception: + val = float('nan') + self.lbl_slice_pos.setText(f"n·origin = {fmt5(val)}") + except Exception: + pass + + # Ranges over slice points + try: + pts = np.asarray(getattr(slice_mesh, 'points', np.empty((0, 3))), dtype=float) + except Exception: + pts = np.empty((0, 3), dtype=float) + def fmt_range(arr: np.ndarray) -> str: + if arr.size == 0: + return "—" + try: + amin = float(np.min(arr)) + amax = float(np.max(arr)) + if not np.isfinite(amin) or not np.isfinite(amax): + return "—" + return f"{amin:.6g}..{amax:.6g}" + except Exception: + return "—" + try: + self.lbl_H_range.setText(fmt_range(pts[:, 0] if pts.shape[1] >= 1 else np.array([]))) + self.lbl_K_range.setText(fmt_range(pts[:, 1] if pts.shape[1] >= 2 else np.array([]))) + self.lbl_L_range.setText(fmt_range(pts[:, 2] if pts.shape[1] >= 3 else np.array([]))) + except Exception: + pass + + # Image size (HxW) + try: + if not target_shape or not isinstance(target_shape, (tuple, list)) or len(target_shape) != 2: + target_shape = (0, 0) + try: + H, W = int(target_shape[0]), int(target_shape[1]) + except Exception: + H, W = 0, 0 + self.lbl_image_size.setText(f"HxW = {H}×{W}") + except Exception: + pass + + # Optional: set base axes to match orientation + try: + if orientation == "HK": + self.set_axes("H", "K") + elif orientation == "KL": + self.set_axes("K", "L") + elif orientation == "HL": + self.set_axes("H", "L") + else: + self.set_axes("U", "V") + except Exception: + pass + except Exception: + # Keep errors contained + pass + + # Logic reuse (HKL only) + def _infer_orientation_and_axes(self, normal: np.ndarray) -> Tuple[str, Optional[Tuple[int, int]], Optional[str]]: + """Infer slice orientation from the plane normal in HKL coordinates. + Returns (orientation, (u_idx, v_idx) for axis-aligned mapping or None, orth_label). + orientation in {'HK','KL','HL','Custom'}; u_idx/v_idx map to columns of pts (0:H, 1:K, 2:L). + orth_label is the axis perpendicular to the plane ('L' for HK, 'H' for KL, 'K' for HL). + """ + try: + n = np.array(normal, dtype=float).reshape(3) + n_norm = float(np.linalg.norm(n)) + if not np.isfinite(n_norm) or n_norm <= 0.0: + n = np.array([0.0, 0.0, 1.0], dtype=float) + else: + n = n / n_norm + X = np.array([1.0, 0.0, 0.0], dtype=float) # H + Y = np.array([0.0, 1.0, 0.0], dtype=float) # K + Z = np.array([0.0, 0.0, 1.0], dtype=float) # L + tol = 0.95 + dX = abs(float(np.dot(n, X))) + dY = abs(float(np.dot(n, Y))) + dZ = abs(float(np.dot(n, Z))) + if dZ >= tol: + # Normal ~ L → HK plane + return "HK", (0, 1), "L" + if dX >= tol: + # Normal ~ H → KL plane + return "KL", (1, 2), "H" + if dY >= tol: + # Normal ~ K → HL plane + return "HL", (0, 2), "K" + return "Custom", None, None + except Exception: + return "Custom", None, None diff --git a/viewer/workbench/docks/info_panel.py b/viewer/workbench/docks/info_panel.py new file mode 100644 index 0000000..b9056ee --- /dev/null +++ b/viewer/workbench/docks/info_panel.py @@ -0,0 +1,5 @@ +from viewer.workbench.docks.base_dock import BaseDock +from viewer.base_window import BaseWindow + +class DataInformation(BaseDock): + pass \ No newline at end of file diff --git a/viewer/workbench/docks/information_dock_base.py b/viewer/workbench/docks/information_dock_base.py new file mode 100644 index 0000000..81b40f3 --- /dev/null +++ b/viewer/workbench/docks/information_dock_base.py @@ -0,0 +1,153 @@ +from pathlib import Path +from typing import Optional + +from PyQt5 import uic +from PyQt5.QtWidgets import QWidget, QLabel +from PyQt5.QtCore import Qt + +from viewer.workbench.docks.base_dock import BaseDock + + +class InformationDockBase(BaseDock): + """Base information dock that loads a .ui and provides simple setters. + + This dock is UI-driven via gui/workbench/docks/information_dock.ui + and exposes helpers to set points count and axis labels. It can be + reused by dimension-specific subclasses (e.g., 2D, 3D). + """ + + def __init__( + self, + title: str = "Information", + main_window=None, + segment_name: Optional[str] = None, + dock_area: Qt.DockWidgetArea = Qt.RightDockWidgetArea, + ): + # BaseDock will perform docking and Windows-menu registration + super().__init__(title=title, main_window=main_window, segment_name=segment_name, dock_area=dock_area) + + # Load the UI into a QWidget and set as the dock widget + project_root = Path(__file__).resolve().parents[3] + ui_path = project_root / "gui" / "workbench" / "docks" / "information_dock.ui" + self._widget = QWidget(self) + try: + uic.loadUi(str(ui_path), self._widget) + except Exception as e: + # Fallback: create a minimal widget if UI load fails + self._widget = QWidget(self) + print(f"[InformationDockBase] Failed to load UI: {e}") + self.setWidget(self._widget) + + # Cache label refs for fast updates + try: + self.lbl_points: QLabel = self._widget.findChild(QLabel, "lbl_points") + self.lbl_axis_x: QLabel = self._widget.findChild(QLabel, "lbl_axis_x") + self.lbl_axis_y: QLabel = self._widget.findChild(QLabel, "lbl_axis_y") + except Exception: + self.lbl_points = None + self.lbl_axis_x = None + self.lbl_axis_y = None + # Intensity labels + try: + self.lbl_int_low: QLabel = self._widget.findChild(QLabel, "lbl_int_low") + self.lbl_int_high: QLabel = self._widget.findChild(QLabel, "lbl_int_high") + except Exception: + self.lbl_int_low = None + self.lbl_int_high = None + # Mouse hover labels + try: + self.lbl_mouse_pos: QLabel = self._widget.findChild(QLabel, "lbl_mouse_pos") + self.lbl_mouse_int: QLabel = self._widget.findChild(QLabel, "lbl_mouse_int") + self.lbl_mouse_H: QLabel = self._widget.findChild(QLabel, "lbl_mouse_H") + self.lbl_mouse_K: QLabel = self._widget.findChild(QLabel, "lbl_mouse_K") + self.lbl_mouse_L: QLabel = self._widget.findChild(QLabel, "lbl_mouse_L") + except Exception: + self.lbl_mouse_pos = None + self.lbl_mouse_int = None + self.lbl_mouse_H = None + self.lbl_mouse_K = None + self.lbl_mouse_L = None + + # Helper setters + def set_points(self, count: Optional[int]) -> None: + try: + if isinstance(count, int): + txt = f"{count:,}" + elif count is None: + txt = "—" + else: + try: + txt = f"{int(count):,}" + except Exception: + txt = str(count) + if self.lbl_points is not None: + self.lbl_points.setText(txt) + except Exception: + pass + + def set_axes(self, x_label: Optional[str], y_label: Optional[str]) -> None: + try: + if self.lbl_axis_x is not None: + self.lbl_axis_x.setText(str(x_label) if x_label else "—") + if self.lbl_axis_y is not None: + self.lbl_axis_y.setText(str(y_label) if y_label else "—") + except Exception: + pass + + def set_intensity(self, low: Optional[float], high: Optional[float]) -> None: + try: + def fmt(val): + if val is None: + return "—" + try: + return f"{float(val):.6g}" + except Exception: + return str(val) + if self.lbl_int_low is not None: + self.lbl_int_low.setText(fmt(low)) + if self.lbl_int_high is not None: + self.lbl_int_high.setText(fmt(high)) + except Exception: + pass + + def set_mouse_info(self, pos: Optional[tuple], intensity: Optional[float], H: Optional[float], K: Optional[float], L: Optional[float]) -> None: + """Update the Mouse section with position, intensity, and HKL values, with HKL colors.""" + try: + def fmtf(val): + if val is None: + return "—" + try: + return f"{float(val):.6g}" + except Exception: + return str(val) + def fmtpos(p): + if not p or len(p) < 2: + return "—" + try: + return f"({int(p[0])}, {int(p[1])})" + except Exception: + return str(p) + if getattr(self, 'lbl_mouse_pos', None) is not None: + self.lbl_mouse_pos.setText(fmtpos(pos)) + if getattr(self, 'lbl_mouse_int', None) is not None: + self.lbl_mouse_int.setText(fmtf(intensity)) + if getattr(self, 'lbl_mouse_H', None) is not None: + self.lbl_mouse_H.setText(fmtf(H)) + try: + self.lbl_mouse_H.setStyleSheet("color: red;") + except Exception: + pass + if getattr(self, 'lbl_mouse_K', None) is not None: + self.lbl_mouse_K.setText(fmtf(K)) + try: + self.lbl_mouse_K.setStyleSheet("color: green;") + except Exception: + pass + if getattr(self, 'lbl_mouse_L', None) is not None: + self.lbl_mouse_L.setText(fmtf(L)) + try: + self.lbl_mouse_L.setStyleSheet("color: blue;") + except Exception: + pass + except Exception: + pass diff --git a/viewer/workbench/docks/slice_plane.py b/viewer/workbench/docks/slice_plane.py new file mode 100644 index 0000000..1123893 --- /dev/null +++ b/viewer/workbench/docks/slice_plane.py @@ -0,0 +1,149 @@ +from PyQt5 import uic +from PyQt5.QtCore import Qt +from PyQt5.QtWidgets import QWidget + +from viewer.workbench.docks.base_dock import BaseDock + + +class SlicePlaneDock(BaseDock): + """ + Slice Controls dock for manipulating the 3D slice plane and camera. + Loads its UI from gui/workbench/docks/slice_plane.ui and wires signals + into Workspace3D (tab_3d) methods. + """ + def __init__(self, title: str = "Slice Controls", main_window=None, segment_name: str = "3d", dock_area: Qt.DockWidgetArea = Qt.LeftDockWidgetArea): + super().__init__(title=title, main_window=main_window, segment_name=segment_name, dock_area=dock_area) + self._widget = None + self._build() + self._wire() + + def setup(self): + # BaseDock handles docking and Windows->segment toggle registration + super().setup() + + def _build(self): + try: + self._widget = QWidget(self) + uic.loadUi('gui/workbench/docks/slice_plane.ui', self._widget) + self.setWidget(self._widget) + except Exception as e: + # If UI fails to load, keep an empty widget to avoid crashing + self._widget = QWidget(self) + self.setWidget(self._widget) + try: + if hasattr(self.main_window, 'update_status'): + self.main_window.update_status(f"SlicePlaneDock UI load failed: {e}") + except Exception: + pass + + def _wire(self): + mw = self.main_window + if mw is None: + return + tab = getattr(mw, 'tab_3d', None) + if tab is None: + return + w = self._widget + try: + # Steps + if hasattr(w, 'sb_slice_translate_step'): + w.sb_slice_translate_step.setValue(0.01) + w.sb_slice_translate_step.valueChanged.connect(lambda v: setattr(tab, '_slice_translate_step', float(v))) + if hasattr(w, 'sb_slice_rotate_step_deg'): + w.sb_slice_rotate_step_deg.setValue(1.0) + w.sb_slice_rotate_step_deg.valueChanged.connect(lambda v: setattr(tab, '_slice_rotate_step_deg', float(v))) + + # Orientation preset + if hasattr(w, 'cb_slice_orientation'): + w.cb_slice_orientation.currentTextChanged.connect(lambda txt: tab.set_plane_preset(str(txt))) + + # Custom normal spinboxes + def _apply_custom_normal(): + try: + h = float(w.sb_norm_h.value()) if hasattr(w, 'sb_norm_h') else 0.0 + k = float(w.sb_norm_k.value()) if hasattr(w, 'sb_norm_k') else 0.0 + l = float(w.sb_norm_l.value()) if hasattr(w, 'sb_norm_l') else 1.0 + if hasattr(tab, 'set_custom_normal'): + tab.set_custom_normal([h, k, l]) + # If Custom preset selected, apply immediately + cur = str(w.cb_slice_orientation.currentText()) if hasattr(w, 'cb_slice_orientation') else '' + if cur.lower().startswith('custom'): + tab.set_plane_preset('Custom') + except Exception: + pass + for name in ('sb_norm_h', 'sb_norm_k', 'sb_norm_l'): + spin = getattr(w, name, None) + if spin is not None: + try: + spin.editingFinished.connect(_apply_custom_normal) + except Exception: + pass + + # Translate buttons + if hasattr(w, 'btn_up_normal'): + w.btn_up_normal.clicked.connect(lambda: tab.nudge_along_normal(+1)) + if hasattr(w, 'btn_down_normal'): + w.btn_down_normal.clicked.connect(lambda: tab.nudge_along_normal(-1)) + if hasattr(w, 'btn_pos_h'): + w.btn_pos_h.clicked.connect(lambda: tab.nudge_along_axis('H', +1)) + if hasattr(w, 'btn_neg_h'): + w.btn_neg_h.clicked.connect(lambda: tab.nudge_along_axis('H', -1)) + if hasattr(w, 'btn_pos_k'): + w.btn_pos_k.clicked.connect(lambda: tab.nudge_along_axis('K', +1)) + if hasattr(w, 'btn_neg_k'): + w.btn_neg_k.clicked.connect(lambda: tab.nudge_along_axis('K', -1)) + if hasattr(w, 'btn_pos_l'): + w.btn_pos_l.clicked.connect(lambda: tab.nudge_along_axis('L', +1)) + if hasattr(w, 'btn_neg_l'): + w.btn_neg_l.clicked.connect(lambda: tab.nudge_along_axis('L', -1)) + + # Rotate buttons use current rotate-step from tab + if hasattr(w, 'btn_rot_plus_h'): + w.btn_rot_plus_h.clicked.connect(lambda: tab.rotate_about_axis('H', +getattr(tab, '_slice_rotate_step_deg', 1.0))) + if hasattr(w, 'btn_rot_minus_h'): + w.btn_rot_minus_h.clicked.connect(lambda: tab.rotate_about_axis('H', -getattr(tab, '_slice_rotate_step_deg', 1.0))) + if hasattr(w, 'btn_rot_plus_k'): + w.btn_rot_plus_k.clicked.connect(lambda: tab.rotate_about_axis('K', +getattr(tab, '_slice_rotate_step_deg', 1.0))) + if hasattr(w, 'btn_rot_minus_k'): + w.btn_rot_minus_k.clicked.connect(lambda: tab.rotate_about_axis('K', -getattr(tab, '_slice_rotate_step_deg', 1.0))) + if hasattr(w, 'btn_rot_plus_l'): + w.btn_rot_plus_l.clicked.connect(lambda: tab.rotate_about_axis('L', +getattr(tab, '_slice_rotate_step_deg', 1.0))) + if hasattr(w, 'btn_rot_minus_l'): + w.btn_rot_minus_l.clicked.connect(lambda: tab.rotate_about_axis('L', -getattr(tab, '_slice_rotate_step_deg', 1.0))) + + # Reset + if hasattr(w, 'btn_reset_slice'): + w.btn_reset_slice.clicked.connect(tab.reset_slice) + + # Visibility + if hasattr(w, 'cb_show_slice'): + w.cb_show_slice.toggled.connect(lambda checked: tab.toggle_3d_slice(bool(checked))) + # Show Points (main cloud) + if hasattr(w, 'cb_show_points'): + w.cb_show_points.toggled.connect(lambda checked: tab.toggle_3d_points(bool(checked))) + + # Camera + if hasattr(w, 'cb_cam_preset'): + w.cb_cam_preset.currentTextChanged.connect(lambda txt: tab.set_camera_position(str(txt))) + if hasattr(w, 'btn_zoom_in'): + w.btn_zoom_in.clicked.connect(tab.zoom_in) + if hasattr(w, 'btn_zoom_out'): + w.btn_zoom_out.clicked.connect(tab.zoom_out) + if hasattr(w, 'btn_reset_camera'): + w.btn_reset_camera.clicked.connect(tab.reset_camera) + if hasattr(w, 'btn_view_slice_normal'): + w.btn_view_slice_normal.clicked.connect(tab.view_slice_normal) + + # Initialize defaults + try: + if hasattr(w, 'cb_slice_orientation'): + w.cb_slice_orientation.setCurrentText('HK (xy)') + if hasattr(w, 'cb_show_slice'): + # Mirror current Tab state if available by checking actor existence + w.cb_show_slice.setChecked(True) + if hasattr(w, 'cb_show_points'): + w.cb_show_points.setChecked(True) + except Exception: + pass + except Exception: + pass diff --git a/viewer/workbench/hkl_3d_plot_dock.py b/viewer/workbench/hkl_3d_plot_dock.py new file mode 100644 index 0000000..9dd034d --- /dev/null +++ b/viewer/workbench/hkl_3d_plot_dock.py @@ -0,0 +1,286 @@ +#!/usr/bin/env python3 +""" +HKL 3D Plot Dock for Workbench (Minimal) + +- No interpolation from points to volume +- No slice plane or extra controls +- Just plotting when 'Load Dataset' is clicked +- Supports HDF5 (points or volume) and VTI volumes +""" +from PyQt5.QtWidgets import QDockWidget, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QMessageBox, QLabel +from PyQt5.QtCore import Qt +import numpy as np + +try: + import pyvista as pyv + from pyvistaqt import QtInteractor + PYVISTA_AVAILABLE = True +except Exception: + PYVISTA_AVAILABLE = False + +# Import HDF5Loader +import sys as _sys, pathlib as _pathlib +_sys.path.append(str(_pathlib.Path(__file__).resolve().parents[2])) +from utils.hdf5_loader import HDF5Loader + +class HKL3DPlotDock(QDockWidget): + def __init__(self, parent, title: str, main_window): + super().__init__(title, parent) + self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) + self.main = main_window + + container = QWidget(self) + layout = QVBoxLayout(container) + layout.setContentsMargins(6, 6, 6, 6) + layout.setSpacing(6) + + # Top row: Load button + top = QHBoxLayout() + self.btn_load = QPushButton("Load Dataset") + self.btn_load.clicked.connect(self.load_data) + top.addWidget(self.btn_load) + layout.addLayout(top) + + + # Plotter + self.plotter = None + if PYVISTA_AVAILABLE: + try: + pyv.set_plot_theme('dark') + except Exception: + pass + try: + self.plotter = QtInteractor(container) + try: + self.plotter.add_axes(xlabel='H', ylabel='K', zlabel='L') + except Exception: + pass + layout.addWidget(self.plotter) + except Exception: + self.plotter = None + self.btn_load.setEnabled(False) + msg = QLabel("3D (VTK) unavailable in tunnel mode.") + try: + msg.setAlignment(Qt.AlignCenter) + except Exception: + pass + try: + msg.setWordWrap(True) + except Exception: + pass + layout.addWidget(msg) + else: + # Fallback: button disabled and message + self.btn_load.setEnabled(False) + try: + QMessageBox.warning(self, "3D Viewer", "PyVista not available. Install pyvista and pyvistaqt to enable 3D plotting.") + except Exception: + pass + + self.setWidget(container) + + # State + self.current_file_path = None + self.current_dataset_path = None + self.cloud_mesh = None + self.volume_grid = None + self.h5loader = HDF5Loader() + + def _clear_plot(self): + try: + if self.plotter is not None: + self.plotter.clear() + self.plotter.add_axes(xlabel='H', ylabel='K', zlabel='L') + except Exception: + pass + self.cloud_mesh = None + self.volume_grid = None + + def _plot_points(self, points: np.ndarray, intensities: np.ndarray): + if self.plotter is None: + return + self._clear_plot() + # PolyData points with intensity scalars + import pyvista as pyv + self.cloud_mesh = pyv.PolyData(points) + self.cloud_mesh['intensity'] = intensities + self.plotter.add_mesh( + self.cloud_mesh, + scalars='intensity', + cmap='jet', + point_size=5.0, + name='points', + show_scalar_bar=True, + reset_camera=True, + ) + try: + self.plotter.show_bounds( + mesh=self.cloud_mesh, + xtitle='H Axis', ytitle='K Axis', ztitle='L Axis', + bounds=self.cloud_mesh.bounds, + ) + try: + ca = getattr(self.plotter.renderer, 'cube_axes_actor', None) + if ca: + ca.GetXAxesLinesProperty().SetColor(1.0, 0.0, 0.0) + ca.GetYAxesLinesProperty().SetColor(0.0, 1.0, 0.0) + ca.GetZAxesLinesProperty().SetColor(0.0, 0.0, 1.0) + ca.GetTitleTextProperty(0).SetColor(1.0, 0.0, 0.0) + ca.GetTitleTextProperty(1).SetColor(0.0, 1.0, 0.0) + ca.GetTitleTextProperty(2).SetColor(0.0, 0.0, 1.0) + ca.GetLabelTextProperty(0).SetColor(1.0, 0.0, 0.0) + ca.GetLabelTextProperty(1).SetColor(0.0, 1.0, 0.0) + ca.GetLabelTextProperty(2).SetColor(0.0, 0.0, 1.0) + except Exception: + pass + except Exception: + pass + try: + self.plotter.reset_camera(); self.plotter.render() + except Exception: + pass + + def _plot_volume_array(self, volume: np.ndarray, metadata: dict = None): + if self.plotter is None: + return + self._clear_plot() + import pyvista as pyv + grid = pyv.ImageData() + dims_cells = np.array(volume.shape, dtype=int) + grid.dimensions = (dims_cells + 1).tolist() + spacing = (metadata or {}).get('voxel_spacing') or (1.0, 1.0, 1.0) + origin = (metadata or {}).get('grid_origin') or (0.0, 0.0, 0.0) + try: + grid.spacing = tuple(float(x) for x in spacing) + except Exception: + grid.spacing = (1.0, 1.0, 1.0) + try: + grid.origin = tuple(float(x) for x in origin) + except Exception: + grid.origin = (0.0, 0.0, 0.0) + try: + arr_order = (metadata or {}).get('array_order', 'F') or 'F' + grid.cell_data['intensity'] = volume.flatten(order=arr_order) + except Exception: + grid.cell_data['intensity'] = volume.flatten(order='F') + self.volume_grid = grid + self.plotter.add_volume( + volume=self.volume_grid, + scalars='intensity', + name='cloud_volume', + reset_camera=True, + show_scalar_bar=True, + ) + try: + self.plotter.show_bounds( + mesh=self.volume_grid, + xtitle='H Axis', ytitle='K Axis', ztitle='L Axis', + bounds=self.volume_grid.bounds, + ) + # Color cube axes H/K/L + try: + ca = getattr(self.plotter.renderer, 'cube_axes_actor', None) + if ca: + ca.GetXAxesLinesProperty().SetColor(1.0, 0.0, 0.0) + ca.GetYAxesLinesProperty().SetColor(0.0, 1.0, 0.0) + ca.GetZAxesLinesProperty().SetColor(0.0, 0.0, 1.0) + ca.GetTitleTextProperty(0).SetColor(1.0, 0.0, 0.0) + ca.GetTitleTextProperty(1).SetColor(0.0, 1.0, 0.0) + ca.GetTitleTextProperty(2).SetColor(0.0, 0.0, 1.0) + ca.GetLabelTextProperty(0).SetColor(1.0, 0.0, 0.0) + ca.GetLabelTextProperty(1).SetColor(0.0, 1.0, 0.0) + ca.GetLabelTextProperty(2).SetColor(0.0, 0.0, 1.0) + except Exception: + pass + except Exception: + pass + try: + self.plotter.reset_camera(); self.plotter.render() + except Exception: + pass + + def load_data(self): + """Load dataset and plot based on type. Prefer Workbench selection; fallback to file dialog.""" + if not PYVISTA_AVAILABLE: + QMessageBox.warning(self, '3D Viewer', 'PyVista is not available.') + return + # Prefer current selection from Workbench + file_path = getattr(self.main, 'current_file_path', None) + dataset_path = getattr(self.main, 'selected_dataset_path', None) + use_dialog = not (file_path and dataset_path) + + if use_dialog: + from PyQt5.QtWidgets import QFileDialog + file_name, _ = QFileDialog.getOpenFileName( + self, 'Select HDF5 or VTI File', '', 'HDF5 or VTI Files (*.h5 *.hdf5 *.vti);;All Files (*)' + ) + if not file_name: + QMessageBox.information(self, 'File', 'No file selected.') + return + self.current_file_path = file_name + # Inspect via loader info + loader = self.h5loader + # VTI path + try: + from pathlib import Path as _Path + if _Path(file_name).suffix.lower() == '.vti': + volume, vol_shape = loader.load_vti_volume_3d(file_name) + if volume is None or int(volume.size) == 0: + QMessageBox.warning(self, 'Load Error', f'No volume data found in VTI file.\nError: {loader.last_error}') + return + meta = getattr(loader, 'file_metadata', {}) or {} + self._plot_volume_array(volume, meta) + return + except Exception: + pass + # HDF5 decide type + try: + info = loader.get_file_info(file_name, style='dict') + except Exception: + info = {} + dt = str(info.get('data_type', '')).lower() or str(info.get('metadata', {}).get('data_type', '')).lower() + if dt == 'volume': + volume, vol_shape = loader.load_h5_volume_3d(file_name) + if volume is None or int(volume.size) == 0: + QMessageBox.warning(self, 'Load Error', f'No volume data found in HDF5 file.\nError: {loader.last_error}') + return + meta = getattr(loader, 'file_metadata', {}) or {} + self._plot_volume_array(volume, meta) + else: + points, intensities, num_images, shape = loader.load_h5_to_3d(file_name) + if int(points.size) == 0 or int(intensities.size) == 0: + QMessageBox.warning(self, 'Load Error', f'No valid 3D point data found.\nError: {loader.last_error}') + return + self._plot_points(points, intensities) + return + + # Use Workbench selection + self.current_file_path = file_path + self.current_dataset_path = dataset_path + try: + import h5py + with h5py.File(file_path, 'r') as h5file: + if dataset_path not in h5file: + QMessageBox.warning(self, 'Load Error', 'Selected dataset not found in file.') + return + item = h5file[dataset_path] + if not hasattr(item, 'shape'): + QMessageBox.warning(self, 'Load Error', 'Selected item is not a dataset.') + return + data = np.asarray(item[...]) + except Exception as e: + QMessageBox.critical(self, 'Error Loading Data', f'Failed to load dataset:\n{e}') + return + # Decide plotting + if data.ndim == 3: + self._plot_volume_array(data, metadata={'array_order': 'F'}) + elif data.ndim >= 2: + # Flatten to points: H,K index grid with intensities from 2D + h, k = data.shape[-2], data.shape[-1] + X, Y = np.meshgrid(np.arange(k), np.arange(h)) + Z = np.zeros_like(X, dtype=float) + points = np.column_stack([X.ravel().astype(float), Y.ravel().astype(float), Z.ravel()]) + intens = np.asarray(data[-1] if data.ndim == 3 else data, dtype=float).ravel() + self._plot_points(points, intens) + else: + QMessageBox.information(self, 'Load', 'Dataset is not 2D/3D numeric; cannot plot.') diff --git a/viewer/workbench/managers/roi_manager.py b/viewer/workbench/managers/roi_manager.py new file mode 100644 index 0000000..068e6f4 --- /dev/null +++ b/viewer/workbench/managers/roi_manager.py @@ -0,0 +1,1001 @@ +""" +ROI Manager for Workbench +Centralizes ROI lifecycle, docks, stats computation, and interactions to shorten WorkbenchWindow. +""" + +from typing import List, Optional +from PyQt5.QtCore import Qt, QSize +from PyQt5.QtGui import QCursor +from PyQt5.QtWidgets import ( + QDockWidget, + QListWidget, + QListWidgetItem, + QTableWidget, + QTableWidgetItem, + QMenu, + QAction, + QInputDialog, + QWidget, + QVBoxLayout, + QHBoxLayout, + QCheckBox, + QLabel, + QToolButton, + QStyle, + QFileDialog, +) +import numpy as np +import pyqtgraph as pg +import qtawesome as qta +import h5py +import os + + +class ContextRectROI(pg.RectROI): + """Rectangular ROI with right-click context menu that delegates actions to main window handlers.""" + def __init__(self, parent_window, pos, size, pen=None): + super().__init__(pos, size, pen=pen) + self.parent_window = parent_window + try: + self.setAcceptedMouseButtons(Qt.LeftButton | Qt.RightButton) + except Exception: + pass + # Make ROI rotatable: add a rotate handle at the top-right, rotating about center + try: + self.addRotateHandle([1, 0], [0.5, 0.5]) + except Exception: + pass + + def mouseClickEvent(self, ev): + try: + if ev.button() == Qt.RightButton: + menu = QMenu() + action_stats = QAction("Show ROI Stats", menu) + action_rename = QAction("Rename ROI", menu) + action_set_active = QAction("Set Active ROI", menu) + action_plot = QAction("Open ROI Plot", menu) + action_delete = QAction("Delete ROI", menu) + + action_stats.triggered.connect(lambda: self.parent_window.roi_manager.show_roi_stats_for_roi(self)) + action_rename.triggered.connect(lambda: self.parent_window.roi_manager.rename_roi(self)) + action_set_active.triggered.connect(lambda: self.parent_window.roi_manager.set_active_roi(self)) + action_plot.triggered.connect(lambda: self.parent_window.open_roi_plot_dock(self)) + action_delete.triggered.connect(lambda: self.parent_window.roi_manager.delete_roi(self)) + + # Add actions and separator before Save + menu.addAction(action_stats) + menu.addAction(action_rename) + menu.addAction(action_set_active) + menu.addAction(action_plot) + menu.addAction(action_delete) + menu.addSeparator() + # Save ROI action + action_save = QAction("Save ROI", menu) + try: + action_save.triggered.connect(lambda: self.parent_window.roi_manager.save_roi(self)) + except Exception: + pass + menu.addAction(action_save) + try: + menu.exec_(QCursor.pos()) + except Exception: + menu.exec_(QCursor.pos()) + ev.accept() + return + except Exception: + pass + # default behavior + try: + super().mouseClickEvent(ev) + except Exception: + pass + + +class ROIManager: + def __init__(self, main_window): + self.main = main_window + # ROI collections/state + self.rois: List[pg.ROI] = [] + self.current_roi: Optional[pg.ROI] = None + self.roi_by_item = {} + self.item_by_roi_id = {} + self.roi_names = {} + self.stats_row_by_roi_id = {} + # Mapping helpers for stats table and overlay labels + self.roi_by_stats_row = {} + self.roi_label_by_id = {} + self.show_names_checkbox = None + self.hidden_roi_ids = set() + + # ----- Setup ----- + def setup_docks(self) -> None: + """Create/attach ROI list dock and ROI stats dock to the main window.""" + try: + # ROI list dock removed per request + pass + except Exception as e: + self.main.update_status(f"Error setting up ROI dock: {e}", level='error') + + try: + # ROI stats dock (renamed to 'ROI') with selection and actions + self.main.roi_stats_dock = QDockWidget("ROI", self.main) + self.main.roi_stats_dock.setAllowedAreas(Qt.RightDockWidgetArea) + + # Container widget to hold controls + table + container = QWidget(self.main.roi_stats_dock) + vlayout = QVBoxLayout(container) + try: + vlayout.setContentsMargins(6, 6, 6, 6) + vlayout.setSpacing(6) + except Exception: + pass + + # Top controls: actions for selected + controls_layout = QHBoxLayout() + lbl_actions = QLabel("Actions for selected:") + self.show_names_checkbox = QCheckBox("Show names above ROIs") + try: + self.show_names_checkbox.toggled.connect(lambda _: self.update_all_roi_labels()) + except Exception: + pass + controls_layout.addWidget(lbl_actions) + controls_layout.addWidget(self.show_names_checkbox) + controls_layout.addStretch(1) + vlayout.addLayout(controls_layout) + + # ROI stats table with a selection checkbox column + self.main.roi_stats_table = QTableWidget(0, 13, container) + self.main.roi_stats_table.setHorizontalHeaderLabels([ + "","Actions","Name","sum","min","max","mean","std","count","x","y","w","h" + ]) + vlayout.addWidget(self.main.roi_stats_table) + + # Set container as dock widget + self.main.roi_stats_dock.setWidget(container) + self.main.addDockWidget(Qt.RightDockWidgetArea, self.main.roi_stats_dock) + # Register toggle under Windows->2d submenu + self.main.add_dock_toggle_action(self.main.roi_stats_dock, "ROI", segment_name="2d") + try: + self.main.roi_stats_dock.visibilityChanged.connect(self.on_roi_stats_dock_visibility_changed) + except Exception: + pass + try: + self.main.roi_stats_table.itemChanged.connect(self.on_roi_stats_item_changed) + except Exception: + pass + except Exception as e: + self.main.update_status(f"Error setting up ROI stats dock: {e}", level='error') + + # ----- ROI lifecycle ----- + def create_and_add_roi(self) -> None: + """Create a new ROI and add it to the image view and docks.""" + try: + if not hasattr(self.main, 'image_view') or not hasattr(self.main, 'current_2d_data') or self.main.current_2d_data is None: + self.main.update_status("Please load image data first", level='warning') + return + + # cycle through a set of distinct colors + roi_colors = [ + (255, 0, 0, 255), + (0, 255, 0, 255), + (0, 0, 255, 255), + (255, 255, 0, 255), + (255, 0, 255, 255), + (0, 255, 255, 255), + ] + pen = roi_colors[len(self.rois) % len(roi_colors)] + roi = ContextRectROI(self.main, [50, 50], [100, 100], pen=pen) + self.main.image_view.addItem(roi) + self.rois.append(roi) + # keep main.rois in sync for compatibility + try: + if hasattr(self.main, 'rois'): + self.main.rois.append(roi) + except Exception: + pass + # track in dock + try: + self.add_roi_to_dock(roi) + except Exception: + pass + # current_roi for compatibility + self.current_roi = roi + try: + roi.sigRegionChanged.connect(lambda r=roi: (self.show_roi_stats_for_roi(r), self.update_roi_item(r), self.refresh_label_for_roi(r))) + # Also update stats when drag/resize finishes (some pyqtgraph versions emit this) + if hasattr(roi, 'sigRegionChangeFinished'): + roi.sigRegionChangeFinished.connect(lambda r=roi: (self.show_roi_stats_for_roi(r), self.update_roi_item(r), self.refresh_label_for_roi(r))) + except Exception: + pass + + # Populate stats immediately for the new ROI + try: + self.show_roi_stats_for_roi(roi) + except Exception: + pass + + self.main.update_status("ROI added - drag to position and resize as needed") + except Exception as e: + self.main.update_status(f"Error drawing ROI: {e}", level='error') + + def set_active_roi(self, roi) -> None: + try: + self.current_roi = roi + self.main.current_roi = roi # keep main in sync + self.main.update_status("Active ROI set") + except Exception as e: + self.main.update_status(f"Error setting active ROI: {e}", level='error') + + def rename_roi(self, roi) -> None: + """Prompt user to rename an ROI and update docks/stats accordingly.""" + try: + current_name = self.get_roi_name(roi) + text, ok = QInputDialog.getText(self.main, "Rename ROI", "Enter ROI name:", text=current_name) + if ok and str(text).strip(): + new_name = str(text).strip() + self.roi_names[id(roi)] = new_name + # update dock list item text + self.update_roi_item(roi) + # update stats table name cell if exists (column 1) + row = self.stats_row_by_roi_id.get(id(roi)) + if row is not None and hasattr(self.main, 'roi_stats_table'): + try: + self.main.roi_stats_table.setItem(row, 1, QTableWidgetItem(new_name)) + except Exception: + pass + # update overlay label text if visible + try: + self.refresh_label_for_roi(roi) + except Exception: + pass + # update dockable ROI plot title if open + try: + if hasattr(self.main, 'update_roi_plot_dock_title'): + self.main.update_roi_plot_dock_title(roi) + except Exception: + pass + self.main.update_status(f"Renamed ROI to '{new_name}'") + except Exception as e: + self.main.update_status(f"Error renaming ROI: {e}", level='error') + + def delete_roi(self, roi) -> None: + try: + # remove overlay label if present + try: + self.remove_label_for_roi(roi) + except Exception: + pass + if hasattr(self.main, 'image_view'): + try: + self.main.image_view.removeItem(roi) + except Exception: + pass + if roi in self.rois: + self.rois.remove(roi) + # keep main.rois in sync + try: + if hasattr(self.main, 'rois') and roi in self.main.rois: + self.main.rois.remove(roi) + except Exception: + pass + if getattr(self.main, 'current_roi', None) is roi: + self.main.current_roi = None + if self.current_roi is roi: + self.current_roi = None + # Update dock list + try: + item = self.item_by_roi_id.pop(id(roi), None) + if item is not None and hasattr(self.main, 'roi_list'): + row = self.main.roi_list.row(item) + self.main.roi_list.takeItem(row) + if item in self.roi_by_item: + self.roi_by_item.pop(item, None) + except Exception: + pass + # Rebuild ROI stats dock for remaining ROIs + try: + if hasattr(self.main, 'roi_stats_table') and self.main.roi_stats_table is not None: + self.main.roi_stats_table.setRowCount(0) + self.stats_row_by_roi_id = {} + self.roi_by_stats_row = {} + frame = self.get_current_frame_data() + for r in self.rois: + s = self.compute_roi_stats(frame, r) + if s: + self.update_stats_table_for_roi(r, s) + except Exception: + pass + self.main.update_status("ROI deleted") + except Exception as e: + self.main.update_status(f"Error deleting ROI: {e}", level='error') + + def save_roi(self, roi) -> None: + """Save the selected ROI to the current HDF5 file under /entry/data/rois with same frame structure.""" + try: + # Ensure we have a current HDF5 file path + file_path = getattr(self.main, "current_file_path", None) + if not file_path or not isinstance(file_path, str): + self.main.update_status("No current HDF5 file loaded", level='warning') + return + + # Access current data and image item (for transform-aware extraction) + data = getattr(self.main, "current_2d_data", None) + if data is None: + frame = self.get_current_frame_data() + if frame is None: + self.main.update_status("No image data to save ROI from", level='warning') + return + data = frame + image_item = getattr(self.main.image_view, 'imageItem', None) if hasattr(self.main, 'image_view') else None + + # Helper: extract ROI subarray from a frame + def extract_sub(frame): + sub = None + try: + if image_item is not None: + sub = roi.getArrayRegion(frame, image_item) + if sub is not None and hasattr(sub, 'ndim') and sub.ndim > 2: + sub = np.squeeze(sub) + except Exception: + sub = None + if sub is None or int(getattr(sub, 'size', 0)) == 0: + pos = roi.pos(); size = roi.size() + x0 = max(0, int(pos.x())); y0 = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + hgt, wid = frame.shape + x1 = min(wid, x0 + w); y1 = min(hgt, y0 + h) + if x0 < x1 and y0 < y1: + sub = frame[y0:y1, x0:x1] + return sub + + # Build ROI stack across frames (or single frame for 2D data) + # Build ROI-only stack: shape is (num_frames, h, w) for 3D data, or (h, w) for 2D + if isinstance(data, np.ndarray) and data.ndim == 3: + num_frames = int(data.shape[0]) + samples = [] + for i in range(num_frames): + frame = np.asarray(data[i], dtype=np.float32) + sub = extract_sub(frame) + if sub is None or int(getattr(sub, 'size', 0)) == 0: + # Fallback to zero array using current ROI box size + size = roi.size(); w = max(1, int(size.x())); h = max(1, int(size.y())) + samples.append(np.zeros((h, w), dtype=np.float32)) + else: + samples.append(np.asarray(sub, dtype=np.float32)) + # Ensure consistent shape across frames by trimming to smallest h,w + min_h = min(s.shape[0] for s in samples) + min_w = min(s.shape[1] for s in samples) + roi_stack = np.stack([s[:min_h, :min_w] for s in samples], axis=0) + else: + frame = np.asarray(data, dtype=np.float32) + sub = extract_sub(frame) + if sub is None or int(getattr(sub, 'size', 0)) == 0: + self.main.update_status("ROI appears empty; nothing to save", level='warning') + return + roi_stack = np.asarray(sub, dtype=np.float32) + + # Write to HDF5 under /entry/data/rois + try: + with h5py.File(file_path, 'a') as h5f: + entry = h5f.require_group('entry') + data_grp = entry.get('data') + if data_grp is None or not isinstance(data_grp, h5py.Group): + data_grp = entry.require_group('data') + rois_grp = data_grp.require_group('rois') + + # Dataset name based on ROI name + name = self.get_roi_name(roi) + ds_name = str(name).replace(' ', '_') + # Replace existing dataset if present + if ds_name in rois_grp: + try: + del rois_grp[ds_name] + except Exception: + pass + dset = rois_grp.create_dataset(ds_name, data=roi_stack, dtype=np.float32) + # Attach ROI metadata as dataset attributes: position/size and source dataset path + try: + pos = roi.pos(); size = roi.size() + x = max(0, int(pos.x())); y = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + dset.attrs['x'] = int(x) + dset.attrs['y'] = int(y) + dset.attrs['w'] = int(w) + dset.attrs['h'] = int(h) + src_path = getattr(self.main, 'selected_dataset_path', None) or '/entry/data/data' + dset.attrs['source_path'] = str(src_path) + except Exception: + pass + + # Info group: original file name and frames used (blank for now) + info_grp = rois_grp.require_group('info') + try: + dt = h5py.string_dtype(encoding='utf-8') + if 'original_file_name' in info_grp: + del info_grp['original_file_name'] + info_grp.create_dataset('original_file_name', data=np.array(os.path.basename(file_path), dtype=dt)) + except Exception: + pass + try: + if 'frames' in info_grp: + del info_grp['frames'] + info_grp.create_dataset('frames', data=np.array([], dtype=np.int32)) + except Exception: + pass + + self.main.update_status(f"ROI saved to HDF5 at /entry/data/rois/{ds_name}") + except Exception as e: + self.main.update_status(f"Error writing ROI to HDF5: {e}", level='error') + except Exception as e: + self.main.update_status(f"Error in save_roi: {e}", level='error') + + def clear_all_rois(self) -> None: + try: + for r in list(self.rois): + self.delete_roi(r) + except Exception: + pass + + def render_rois_for_dataset(self, file_path: str, dataset_path: str) -> None: + """Render ROI boxes associated with the given dataset by reading HDF5 ROI metadata.""" + try: + if not file_path or not os.path.exists(file_path): + return + with h5py.File(file_path, 'r') as h5f: + rois_grp = h5f.get('/entry/data/rois') + if rois_grp is None: + return + # Iterate ROI datasets + for name in rois_grp.keys(): + item = rois_grp.get(name) + if not isinstance(item, h5py.Dataset): + continue + src = item.attrs.get('source_path', None) + if src is None: + continue + if str(src) != str(dataset_path): + continue + # Read xywh + x = int(item.attrs.get('x', 0)) + y = int(item.attrs.get('y', 0)) + w = int(item.attrs.get('w', max(1, item.shape[-1] if len(item.shape) >= 2 else 1))) + h = int(item.attrs.get('h', max(1, item.shape[-2] if len(item.shape) >= 2 else 1))) + # Create ROI + pen = (255, 0, 0, 255) + roi = ContextRectROI(self.main, [x, y], [w, h], pen=pen) + try: + self.main.image_view.addItem(roi) + except Exception: + continue + # Track in manager structures + self.rois.append(roi) + try: + if hasattr(self.main, 'rois'): + self.main.rois.append(roi) + except Exception: + pass + # Name mapping: use dataset name + try: + self.roi_names[id(roi)] = str(name) + except Exception: + pass + # Wire signals and populate stats + try: + roi.sigRegionChanged.connect(lambda r=roi: (self.show_roi_stats_for_roi(r), self.update_roi_item(r), self.refresh_label_for_roi(r))) + if hasattr(roi, 'sigRegionChangeFinished'): + roi.sigRegionChangeFinished.connect(lambda r=roi: (self.show_roi_stats_for_roi(r), self.update_roi_item(r), self.refresh_label_for_roi(r))) + self.show_roi_stats_for_roi(roi) + except Exception: + pass + except Exception: + pass + + # ----- ROI stats ----- + def get_current_frame_data(self): + """Return the image currently displayed in the ImageView. + Falls back to the underlying current_2d_data if the ImageView has no image yet. + This ensures ROI stats reflect what the user sees (including frame/log/levels changes). + """ + try: + # Prefer the image currently displayed in the ImageView + img = None + try: + if hasattr(self.main, 'image_view'): + # Try ImageView.getImage() first (includes display transforms) + if hasattr(self.main.image_view, 'getImage'): + try: + img = self.main.image_view.getImage() + if img is not None: + arr = np.asarray(img, dtype=np.float32) + if isinstance(arr, tuple) and len(arr) > 0: + arr = np.asarray(arr[0], dtype=np.float32) + if arr.ndim == 3: + arr = np.asarray(arr[0], dtype=np.float32) + if arr.ndim == 2 and arr.size > 0: + + return arr + except Exception: + pass + # Fallback to imageItem.image + if hasattr(self.main.image_view, 'imageItem') and self.main.image_view.imageItem is not None: + img = getattr(self.main.image_view.imageItem, 'image', None) + except Exception: + img = None + + if img is not None: + arr = np.asarray(img, dtype=np.float32) + # Some versions store a tuple (data, ...); ensure we pick array + if isinstance(arr, tuple) and len(arr) > 0: + arr = np.asarray(arr[0], dtype=np.float32) + # Ensure 2D slice + if arr.ndim == 3: + # Use first frame if a 3D stack somehow made it to imageItem + arr = np.asarray(arr[0], dtype=np.float32) + if arr.ndim == 2 and arr.size > 0: + + return arr + + # Fallback: use the underlying data model + if not hasattr(self.main, 'current_2d_data') or self.main.current_2d_data is None: + return None + if self.main.current_2d_data.ndim == 3: + frame_index = 0 + if hasattr(self.main, 'frame_spinbox') and self.main.frame_spinbox.isEnabled(): + frame_index = self.main.frame_spinbox.value() + if frame_index < 0 or frame_index >= self.main.current_2d_data.shape[0]: + frame_index = 0 + arr = np.asarray(self.main.current_2d_data[frame_index], dtype=np.float32) + + return arr + else: + arr = np.asarray(self.main.current_2d_data, dtype=np.float32) + + return arr + except Exception: + return None + + def compute_roi_stats(self, frame_data, roi): + """Compute stats for ROI using pyqtgraph's array-extraction helpers to honor image/item transforms. + Falls back to bounding-box slicing if needed. Returns None if ROI is empty/out-of-bounds. + """ + try: + if frame_data is None or roi is None: + return None + + # Try to extract ROI region via pyqtgraph (handles scale/transform/orientation) + image_item = getattr(self.main.image_view, 'imageItem', None) if hasattr(self.main, 'image_view') else None + sub = None + try: + if image_item is not None: + sub = roi.getArrayRegion(frame_data, image_item) + if sub is not None and hasattr(sub, 'ndim'): + # Ensure 2D (some returns may add an extra dim) + if sub.ndim > 2: + sub = np.squeeze(sub) + except Exception: + sub = None + + # Compute xywh using getArraySlice if possible (pixel-space), else fallback to ROI pos/size + x0 = y0 = w = h = None + try: + if image_item is not None: + slc_info = roi.getArraySlice(frame_data, image_item) + # slc_info returns (slices, transform). slices is typically a tuple of (rows, cols) + slices = slc_info[0] if (isinstance(slc_info, (tuple, list)) and len(slc_info) > 0) else None + if isinstance(slices, (tuple, list)) and len(slices) >= 2: + rs, cs = slices[0], slices[1] + # Handle slice or numpy index arrays + def _bounds_from_index(idx, maxdim): + try: + if isinstance(idx, slice): + start = int(0 if idx.start is None else idx.start) + stop = int(maxdim if idx.stop is None else idx.stop) + return start, stop + idx_arr = np.asarray(idx) + if idx_arr.size > 0: + return int(np.min(idx_arr)), int(np.max(idx_arr) + 1) + except Exception: + pass + return 0, maxdim + y0_, y1_ = _bounds_from_index(rs, frame_data.shape[0]) + x0_, x1_ = _bounds_from_index(cs, frame_data.shape[1]) + x0, y0 = max(0, x0_), max(0, y0_) + w = max(0, x1_ - x0_) + h = max(0, y1_ - y0_) + except Exception: + pass + + # If array-region failed or slices produced invalid region, fallback to bounding box from ROI pos/size + try: + if sub is None or (hasattr(sub, 'size') and int(sub.size) == 0) or any(v is None for v in (x0, y0, w, h)): + height, width = frame_data.shape + pos = roi.pos(); size = roi.size() + x0 = max(0, int(pos.x())); y0 = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + x1 = min(width, x0 + w); y1 = min(height, y0 + h) + if x0 >= x1 or y0 >= y1: + return None + sub = frame_data[y0:y1, x0:x1] + w = x1 - x0; h = y1 - y0 + except Exception: + return None + + # Final safety: ensure we have a valid sub-region + if sub is None or int(sub.size) == 0: + return None + + stats = { + 'x': int(x0) if x0 is not None else 0, + 'y': int(y0) if y0 is not None else 0, + 'w': int(w) if w is not None else int(sub.shape[1]) if sub.ndim == 2 else 0, + 'h': int(h) if h is not None else int(sub.shape[0]) if sub.ndim == 2 else 0, + 'sum': float(np.sum(sub)), + 'min': float(np.min(sub)), + 'max': float(np.max(sub)), + 'mean': float(np.mean(sub)), + 'std': float(np.std(sub)), + 'count': int(sub.size), + } + + return stats + except Exception: + return None + + def show_roi_stats_for_roi(self, roi) -> None: + try: + frame = self.get_current_frame_data() + stats = self.compute_roi_stats(frame, roi) + if stats is None: + self.main.update_status("ROI stats unavailable", level='warning') + return + text = (f"ROI [{stats['x']},{stats['y']} {stats['w']}x{stats['h']}] | " + f"sum={stats['sum']:.3f} min={stats['min']:.3f} max={stats['max']:.3f} " + f"mean={stats['mean']:.3f} std={stats['std']:.3f} count={stats['count']}") + if hasattr(self.main, 'roi_stats_label') and self.main.roi_stats_label is not None: + try: + self.main.roi_stats_label.setText(f"ROI Stats: {text}") + except Exception: + pass + try: + self.update_stats_table_for_roi(roi, stats) + except Exception: + pass + self.main.update_status("ROI stats computed") + except Exception as e: + self.main.update_status(f"Error showing ROI stats: {e}", level='error') + + # ----- Batch stats refresh ----- + def update_all_roi_stats(self): + try: + frame = self.get_current_frame_data() + if frame is None: + return + for r in list(self.rois): + s = self.compute_roi_stats(frame, r) + if s: + self.update_stats_table_for_roi(r, s) + except Exception: + pass + + # ----- Dock/list helpers ----- + def format_roi_text(self, roi): + try: + pos = roi.pos(); size = roi.size() + x = int(pos.x()); y = int(pos.y()) + w = int(size.x()); h = int(size.y()) + name = self.get_roi_name(roi) + return f"{name}: x={x}, y={y}, w={w}, h={h}" + except Exception: + return "ROI" + + def get_roi_name(self, roi): + try: + name = self.roi_names.get(id(roi)) + if name: + return name + idx = 1 + if roi in self.rois: + idx = self.rois.index(roi) + 1 + name = f"ROI {idx}" + self.roi_names[id(roi)] = name + return name + except Exception: + return "ROI" + + def add_roi_to_dock(self, roi): + try: + if not hasattr(self.main, 'roi_list') or self.main.roi_list is None: + return + text = self.format_roi_text(roi) + item = QListWidgetItem(text) + self.main.roi_list.addItem(item) + self.roi_by_item[item] = roi + self.item_by_roi_id[id(roi)] = item + except Exception as e: + self.main.update_status(f"Error adding ROI to dock: {e}", level='error') + + def update_roi_item(self, roi): + try: + item = self.item_by_roi_id.get(id(roi)) + if item is not None: + item.setText(self.format_roi_text(roi)) + except Exception: + pass + + def on_roi_list_item_clicked(self, item): + try: + roi = self.roi_by_item.get(item) + if roi: + self.set_active_roi(roi) + except Exception as e: + self.main.update_status(f"Error selecting ROI from dock: {e}", level='error') + + def on_roi_list_item_double_clicked(self, item): + try: + roi = self.roi_by_item.get(item) + if roi: + self.show_roi_stats_for_roi(roi) + except Exception as e: + self.main.update_status(f"Error showing ROI stats from dock: {e}", level='error') + + def on_roi_stats_item_changed(self, item): + """Respond to selection checkbox toggles and name edits in the ROI stats table.""" + try: + if item is None: + return + row = item.row() + col = item.column() + roi = self.roi_by_stats_row.get(row) + if not roi: + return + if col == 0: + # Selection checkbox toggled + self.update_label_visibility_for_roi(roi) + elif col == 2: + # Name edited; update internal mapping and overlay label + try: + new_name = item.text() if hasattr(item, 'text') else None + if new_name: + self.roi_names[id(roi)] = str(new_name) + # update overlay label text if visible + self.refresh_label_for_roi(roi) + # update dockable ROI plot title if open + try: + if hasattr(self.main, 'update_roi_plot_dock_title'): + self.main.update_roi_plot_dock_title(roi) + except Exception: + pass + # Update any dock list item if present + try: + self.update_roi_item(roi) + except Exception: + pass + except Exception: + pass + except Exception: + pass + + def on_rois_dock_visibility_changed(self, visible): + try: + if hasattr(self.main, 'action_show_rois_dock'): + self.main.action_show_rois_dock.setChecked(bool(visible)) + except Exception: + pass + + def on_roi_stats_dock_visibility_changed(self, visible): + try: + if hasattr(self.main, 'action_show_roi_stats_dock'): + self.main.action_show_roi_stats_dock.setChecked(bool(visible)) + except Exception: + pass + + # ----- Overlay label helpers ----- + def update_all_roi_labels(self): + try: + for roi in list(self.rois): + self.update_label_visibility_for_roi(roi) + except Exception: + pass + + def update_label_visibility_for_roi(self, roi): + """Show/hide ROI name label above ROI depending on selection and checkbox.""" + try: + # Determine selection state from stats table + row = self.stats_row_by_roi_id.get(id(roi)) + selected = False + if row is not None and hasattr(self.main, 'roi_stats_table'): + try: + sel_item = self.main.roi_stats_table.item(row, 0) + selected = bool(sel_item) and sel_item.checkState() == Qt.Checked + except Exception: + selected = False + show_names = bool(self.show_names_checkbox and self.show_names_checkbox.isChecked()) + if selected and show_names: + # ensure label exists and update position/text + self.create_label_for_roi(roi) + self.refresh_label_for_roi(roi) + else: + # hide/remove label for this ROI + self.remove_label_for_roi(roi) + except Exception: + pass + + def create_label_for_roi(self, roi): + try: + if id(roi) in self.roi_label_by_id: + return + if not hasattr(self.main, 'image_view'): + return + name = self.get_roi_name(roi) + label = pg.TextItem(text=name, color='w') + try: + label.setAnchor((0, 1)) # bottom-left anchor + except Exception: + pass + self.main.image_view.addItem(label) + self.roi_label_by_id[id(roi)] = label + except Exception: + pass + + def refresh_label_for_roi(self, roi): + try: + label = self.roi_label_by_id.get(id(roi)) + if not label: + return + name = self.get_roi_name(roi) + try: + label.setText(name) + except Exception: + pass + pos = roi.pos() + x = float(getattr(pos, 'x', lambda: 0)()) + y = float(getattr(pos, 'y', lambda: 0)()) + # place just above the ROI box + y = max(0.0, y - 5.0) + try: + label.setPos(x, y) + except Exception: + pass + except Exception: + pass + + def remove_label_for_roi(self, roi): + try: + label = self.roi_label_by_id.pop(id(roi), None) + if label and hasattr(self.main, 'image_view'): + try: + self.main.image_view.removeItem(label) + except Exception: + pass + except Exception: + pass + + def set_roi_visibility(self, roi, visible: bool): + """Hide/show the ROI graphics and related overlay label using QtAwesome controls.""" + try: + if not hasattr(self.main, 'image_view'): + return + try: + roi.setVisible(bool(visible)) + except Exception: + pass + if visible: + try: + self.hidden_roi_ids.discard(id(roi)) + except Exception: + pass + self.update_label_visibility_for_roi(roi) + else: + try: + self.hidden_roi_ids.add(id(roi)) + except Exception: + pass + self.remove_label_for_roi(roi) + except Exception: + pass + + # ----- Stats table helpers ----- + def ensure_stats_row_for_roi(self, roi): + try: + if id(roi) in self.stats_row_by_roi_id: + return self.stats_row_by_roi_id[id(roi)] + if not hasattr(self.main, 'roi_stats_table') or self.main.roi_stats_table is None: + return None + row = self.main.roi_stats_table.rowCount() + self.main.roi_stats_table.insertRow(row) + self.stats_row_by_roi_id[id(roi)] = row + self.roi_by_stats_row[row] = roi + # selection checkbox in column 0 + try: + select_item = QTableWidgetItem() + select_item.setFlags(select_item.flags() | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled) + select_item.setCheckState(Qt.Unchecked) + self.main.roi_stats_table.setItem(row, 0, select_item) + except Exception: + pass + # actions widget in column 1: hide/show and delete using QtAwesome icons + try: + actions_widget = QWidget() + h = QHBoxLayout(actions_widget) + h.setContentsMargins(0, 0, 0, 0) + h.setSpacing(2) + icon_visible = qta.icon('fa.eye', color='black') + icon_hidden = qta.icon('fa.eye-slash', color='black') + icon_trash = qta.icon('fa.trash', color='black') + # Match checkbox indicator size + try: + style = getattr(self.main, 'style', lambda: None)() + indicator_w = style.pixelMetric(QStyle.PM_IndicatorWidth) if style else 16 + indicator_h = style.pixelMetric(QStyle.PM_IndicatorHeight) if style else 16 + except Exception: + indicator_w, indicator_h = 16, 16 + icon_size = QSize(indicator_w, indicator_h) + btn_eye = QToolButton(actions_widget) + btn_eye.setAutoRaise(True) + btn_eye.setCheckable(True) + visible = id(roi) not in self.hidden_roi_ids + btn_eye.setChecked(visible) + btn_eye.setIcon(icon_visible if visible else icon_hidden) + btn_eye.setIconSize(icon_size) + try: + btn_eye.setFixedSize(icon_size.width()+4, icon_size.height()+4) + except Exception: + pass + btn_eye.setToolTip("Hide/Show ROI") + btn_trash = QToolButton(actions_widget) + btn_trash.setAutoRaise(True) + btn_trash.setIcon(icon_trash) + btn_trash.setIconSize(icon_size) + try: + btn_trash.setFixedSize(icon_size.width()+4, icon_size.height()+4) + except Exception: + pass + btn_trash.setToolTip("Delete ROI") + # wire actions + def on_eye_toggled(checked, r=roi, b=btn_eye): + try: + self.set_roi_visibility(r, bool(checked)) + b.setIcon(icon_visible if bool(checked) else icon_hidden) + except Exception: + pass + btn_eye.toggled.connect(on_eye_toggled) + btn_trash.clicked.connect(lambda _, r=roi: self.delete_roi(r)) + h.addWidget(btn_eye) + h.addWidget(btn_trash) + h.addStretch(1) + self.main.roi_stats_table.setCellWidget(row, 1, actions_widget) + try: + self.main.roi_stats_table.setRowHeight(row, icon_size.height()+6) + except Exception: + pass + except Exception: + pass + # set name cell (column 2) + name = self.get_roi_name(roi) + self.main.roi_stats_table.setItem(row, 2, QTableWidgetItem(name)) + return row + except Exception: + return None + + def update_stats_table_for_roi(self, roi, stats): + try: + row = self.ensure_stats_row_for_roi(roi) + if row is None: + return + # debug: log computed stats + + # keep name cell in sync (column 2) + self.main.roi_stats_table.setItem(row, 2, QTableWidgetItem(self.get_roi_name(roi))) + # fill numeric cells with xywh at the end starting column 3 + self.main.roi_stats_table.setItem(row, 3, QTableWidgetItem(f"{stats['sum']:.3f}")) + self.main.roi_stats_table.setItem(row, 4, QTableWidgetItem(f"{stats['min']:.3f}")) + self.main.roi_stats_table.setItem(row, 5, QTableWidgetItem(f"{stats['max']:.3f}")) + self.main.roi_stats_table.setItem(row, 6, QTableWidgetItem(f"{stats['mean']:.3f}")) + self.main.roi_stats_table.setItem(row, 7, QTableWidgetItem(f"{stats['std']:.3f}")) + self.main.roi_stats_table.setItem(row, 8, QTableWidgetItem(str(stats['count']))) + self.main.roi_stats_table.setItem(row, 9, QTableWidgetItem(str(stats['x']))) + self.main.roi_stats_table.setItem(row, 10, QTableWidgetItem(str(stats['y']))) + self.main.roi_stats_table.setItem(row, 11, QTableWidgetItem(str(stats['w']))) + self.main.roi_stats_table.setItem(row, 12, QTableWidgetItem(str(stats['h']))) + except Exception: + pass diff --git a/viewer/workbench/roi_math_dock.py b/viewer/workbench/roi_math_dock.py new file mode 100644 index 0000000..d9bfa75 --- /dev/null +++ b/viewer/workbench/roi_math_dock.py @@ -0,0 +1,305 @@ +#!/usr/bin/env python3 +""" +ROIMathDock: A dockable window for ROI math expressions (1D view) + +- Displays the ROI sub-image flattened to 1D (Index vs Value) +- Lets user define multiple math expressions using x (indices), y (ROI values), numpy (np), + and common numpy functions (sin, cos, log, exp, sqrt, abs, clip, where) +- Each expression renders as a separate colored curve with a legend entry +- Updates automatically when the ROI region changes or when the Workbench frame changes +""" + +from PyQt5.QtWidgets import ( + QDockWidget, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, + QListWidget, QListWidgetItem, QGroupBox, QMessageBox +) +from PyQt5.QtCore import Qt +import numpy as np +import pyqtgraph as pg + +class ROIMathDock(QDockWidget): + def __init__(self, parent, title: str, main_window, roi): + super().__init__(title, parent) + self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) + self.main = main_window + self.roi = roi + + # Container for plot + controls + container = QWidget(self) + layout = QVBoxLayout(container) + try: + layout.setContentsMargins(6, 6, 6, 6) + layout.setSpacing(6) + except Exception: + pass + + # Plot setup + self.plot_item = pg.PlotItem() + self.plot_item.setLabel('bottom', 'Index') + self.plot_item.setLabel('left', 'Value') + self.plot_widget = pg.PlotWidget(plotItem=self.plot_item) + layout.addWidget(self.plot_widget) + try: + self.plot_item.addLegend() + except Exception: + pass + + # ROI Math panel + self._setup_roi_math_panel(layout) + + # Install central widget + self.setWidget(container) + + # Internal storage for equations and their plotted items + self.math_items = {} # name -> {'expr': str, 'curve': PlotDataItem} + self._color_index = 0 + self._colors = [ + (31, 119, 180), (255, 127, 14), (44, 160, 44), (214, 39, 40), + (148, 103, 189), (140, 86, 75), (227, 119, 194), (127, 127, 127), + (188, 189, 34), (23, 190, 207) + ] + + # Compute initial ROI vector and plot base curve + self._update_base_curve() + + # Wire interactions to keep data in sync + self._wire_interactions() + + # ----- UI setup ----- + def _setup_roi_math_panel(self, parent_layout: QVBoxLayout): + gb = QGroupBox("ROI Math") + v = QVBoxLayout(gb) + + # Instructions + lbl = QLabel( + "Define expressions using x (indices), y (ROI values), and numpy (np).\n" + "Examples: y*2, np.log1p(y), (y-y.mean())/(y.std()+1e-9), clip(y,0,1000)" + ) + lbl.setWordWrap(True) + v.addWidget(lbl) + + # Input row: name + expression + add button + row = QHBoxLayout() + self.eq_name_edit = QLineEdit(); self.eq_name_edit.setPlaceholderText("Equation name (optional)") + self.eq_edit = QLineEdit(); self.eq_edit.setPlaceholderText("Enter expression e.g., np.log1p(y)") + self.btn_add = QPushButton("Add Equation") + self.btn_add.clicked.connect(self._on_add_equation) + row.addWidget(self.eq_name_edit) + row.addWidget(self.eq_edit) + row.addWidget(self.btn_add) + v.addLayout(row) + + # Buttons: recompute all, remove selected, clear all + btn_row = QHBoxLayout() + self.btn_recompute = QPushButton("Recompute & Plot All") + self.btn_recompute.clicked.connect(self._recompute_all) + self.btn_remove = QPushButton("Remove Selected") + self.btn_remove.clicked.connect(self._remove_selected) + self.btn_clear = QPushButton("Clear All") + self.btn_clear.clicked.connect(self._clear_all) + btn_row.addWidget(self.btn_recompute) + btn_row.addWidget(self.btn_remove) + btn_row.addWidget(self.btn_clear) + v.addLayout(btn_row) + + # List of equations + self.eq_list = QListWidget() + self.eq_list.itemDoubleClicked.connect(self._edit_equation_item) + v.addWidget(self.eq_list) + + parent_layout.addWidget(gb) + + # ----- Data extraction ----- + def _extract_roi_subimage(self): + """Extract the ROI sub-image for the current frame, honoring transforms via getArrayRegion.""" + frame = None + try: + frame = self.main.get_current_frame_data() + except Exception: + frame = None + if frame is None: + return None + sub = None + try: + image_item = getattr(self.main.image_view, 'imageItem', None) if hasattr(self.main, 'image_view') else None + if image_item is not None: + sub = self.roi.getArrayRegion(frame, image_item) + if sub is not None and hasattr(sub, 'ndim') and sub.ndim > 2: + sub = np.squeeze(sub) + except Exception: + sub = None + if sub is None or int(getattr(sub, 'size', 0)) == 0: + # Fallback to axis-aligned bbox + try: + pos = self.roi.pos(); size = self.roi.size() + x0 = max(0, int(pos.x())); y0 = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + hgt, wid = frame.shape + x1 = min(wid, x0 + w); y1 = min(hgt, y0 + h) + if x0 < x1 and y0 < y1: + sub = frame[y0:y1, x0:x1] + except Exception: + sub = None + return sub + + def _update_base_curve(self): + sub = self._extract_roi_subimage() + if sub is None or int(getattr(sub, 'size', 0)) == 0: + # Plot an empty placeholder + self.x = np.array([0], dtype=int) + self.y = np.array([0.0], dtype=float) + else: + self.y = np.asarray(sub, dtype=np.float32).ravel() + self.x = np.arange(len(self.y)) + # Plot base ROI curve + try: + self.plot_item.clear() + try: + self.plot_item.addLegend() + except Exception: + pass + self.base_curve = self.plot_item.plot(self.x, self.y, pen=pg.mkPen(color='y', width=1.5), name='ROI') + except Exception: + # Fallback + try: + self.base_curve = self.plot_widget.plot(self.x, self.y, pen='y', clear=True) + except Exception: + pass + # Recompute math curves after base update + self._recompute_all() + + # ----- Signal wiring ----- + def _wire_interactions(self): + # ROI changes -> update base and math curves + try: + if hasattr(self.roi, 'sigRegionChanged'): + self.roi.sigRegionChanged.connect(self._update_base_curve) + if hasattr(self.roi, 'sigRegionChangeFinished'): + self.roi.sigRegionChangeFinished.connect(self._update_base_curve) + except Exception: + pass + # Frame spinbox -> update base and math curves + try: + if hasattr(self.main, 'frame_spinbox'): + self.main.frame_spinbox.valueChanged.connect(lambda _: self._update_base_curve()) + except Exception: + pass + # Log scale toggle -> update curves based on what image shows (optional) + try: + if hasattr(self.main, 'cbLogScale'): + self.main.cbLogScale.toggled.connect(lambda _: self._update_base_curve()) + except Exception: + pass + + # ----- Math engine ----- + def _next_color(self): + color = self._colors[self._color_index % len(self._colors)] + self._color_index += 1 + return pg.mkPen(color=color, width=1.5) + + def _safe_eval(self, expr: str): + """Safely evaluate an expression using restricted namespace. + Returns a numpy array of shape (N,) or a scalar. Raises on error. + """ + allowed = { + 'np': np, + 'x': self.x, + 'y': self.y, + 'sin': np.sin, 'cos': np.cos, 'log': np.log, 'exp': np.exp, 'sqrt': np.sqrt, + 'abs': np.abs, 'clip': np.clip, 'where': np.where, + } + globals_dict = {'__builtins__': {}} + return eval(expr, globals_dict, allowed) + + def _plot_curve(self, name: str, y_curve): + # Convert scalar to horizontal line + if np.isscalar(y_curve): + y_curve = np.full_like(self.x, float(y_curve), dtype=float) + else: + y_curve = np.asarray(y_curve, dtype=float) + # Validate length + if y_curve.shape[0] != self.x.shape[0]: + raise ValueError(f"Expression result length {y_curve.shape[0]} does not match ROI length {self.x.shape[0]}") + # Remove old curve if re-plotting + if name in self.math_items and self.math_items[name]['curve'] is not None: + try: + self.plot_item.removeItem(self.math_items[name]['curve']) + except Exception: + pass + self.math_items[name]['curve'] = None + # Add new curve + pen = self._next_color() + curve = self.plot_item.plot(self.x, y_curve, pen=pen, name=name) + return curve + + # ----- Handlers ----- + def _on_add_equation(self): + expr = (self.eq_edit.text() or '').strip() + if not expr: + QMessageBox.information(self, "ROI Math", "Please enter an expression.") + return + name = (self.eq_name_edit.text() or '').strip() + if not name: + name = f"eq{len(self.math_items) + 1}" + try: + result = self._safe_eval(expr) + curve = self._plot_curve(name, result) + except Exception as e: + QMessageBox.critical(self, "ROI Math Error", f"Could not evaluate expression:\n{expr}\n\n{e}") + return + self.math_items[name] = {'expr': expr, 'curve': curve} + item = QListWidgetItem(f"{name}: {expr}") + item.setData(32, name) + self.eq_list.addItem(item) + self.eq_name_edit.clear() + self.eq_edit.clear() + + def _edit_equation_item(self, item: QListWidgetItem): + name = item.data(32) + if not name: + return + expr = self.math_items.get(name, {}).get('expr', '') + self.eq_name_edit.setText(name) + self.eq_edit.setText(expr) + + def _recompute_all(self): + # Recompute and update curves for all equations + for i in range(self.eq_list.count()): + item = self.eq_list.item(i) + name = item.data(32) + if not name: + continue + expr = self.math_items.get(name, {}).get('expr') + if not expr: + continue + try: + result = self._safe_eval(expr) + curve = self._plot_curve(name, result) + self.math_items[name]['curve'] = curve + except Exception as e: + QMessageBox.critical(self, "ROI Math Error", f"Error recomputing '{name}':\n{expr}\n\n{e}") + + def _remove_selected(self): + item = self.eq_list.currentItem() + if not item: + return + name = item.data(32) + try: + curve = self.math_items.get(name, {}).get('curve') + if curve is not None: + self.plot_item.removeItem(curve) + except Exception: + pass + self.math_items.pop(name, None) + row = self.eq_list.row(item) + self.eq_list.takeItem(row) + + def _clear_all(self): + for name, rec in list(self.math_items.items()): + try: + if rec.get('curve') is not None: + self.plot_item.removeItem(rec['curve']) + except Exception: + pass + self.math_items.clear() + self.eq_list.clear() diff --git a/viewer/workbench/roi_plot_dialog.py b/viewer/workbench/roi_plot_dialog.py new file mode 100644 index 0000000..b2103d9 --- /dev/null +++ b/viewer/workbench/roi_plot_dialog.py @@ -0,0 +1,257 @@ +#!/usr/bin/env python3 +""" +ROI Plot Dialog for Workbench (1D View) + +Displays a 1D graph using the same approach as the Workbench 1D viewer: +- Flattens the ROI array to a 1D vector and plots Index vs Value +- Uses PyQtGraph PlotWidget with labeled axes +- Modeless dialog, does not block the main window + +Adds a ROI Math panel to define and plot arbitrary math expressions +over the ROI data. You can add multiple equations; each renders as an +additional curve with a legend entry. + +Variables available in expressions: +- x: index array (0..N-1) +- y: ROI values (flattened to 1D) +- np: numpy module +- Common numpy functions are also imported directly: sin, cos, log, exp, sqrt, abs, clip, where + +Examples: +- y * 2 +- np.log1p(y) +- (y - y.mean()) / (y.std() + 1e-9) +- clip(y, 0, 1000) +If an expression evaluates to a scalar, a horizontal line is plotted across x. +""" + +from PyQt5.QtWidgets import ( + QDialog, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, + QListWidget, QListWidgetItem, QGroupBox, QMessageBox +) +import numpy as np +import pyqtgraph as pg + +class ROIPlotDialog(QDialog): + def __init__(self, parent, roi_image: np.ndarray): + super().__init__(parent) + self.setWindowTitle("ROI 1D Plot") + layout = QVBoxLayout(self) + + # Prepare 1D data: flatten ROI image into a vector + self.y = np.asarray(roi_image, dtype=np.float32).ravel() + self.x = np.arange(len(self.y)) + + # Create a PlotItem and PlotWidget like the 1D viewer + self.plot_item = pg.PlotItem() + self.plot_item.setLabel('bottom', 'Index') + self.plot_item.setLabel('left', 'Value') + self.plot_widget = pg.PlotWidget(plotItem=self.plot_item) + layout.addWidget(self.plot_widget) + + # Add legend for multiple curves + try: + self.plot_item.addLegend() + except Exception: + pass + + # Plot the base ROI 1D data + try: + self.base_curve = self.plot_item.plot(self.x, self.y, pen=pg.mkPen(color='y', width=1.5), name='ROI') + except Exception: + # Fallback: simple PlotWidget plot + self.base_curve = self.plot_widget.plot(self.x, self.y, pen='y') + + # ROI Math group + self._setup_roi_math_panel(layout) + + # Internal storage for equations and their plotted items + self.math_items = {} # name -> {'expr': str, 'curve': PlotDataItem} + self._color_index = 0 + self._colors = [ + (31, 119, 180), (255, 127, 14), (44, 160, 44), (214, 39, 40), + (148, 103, 189), (140, 86, 75), (227, 119, 194), (127, 127, 127), + (188, 189, 34), (23, 190, 207) + ] + + def _setup_roi_math_panel(self, parent_layout: QVBoxLayout): + gb = QGroupBox("ROI Math") + v = QVBoxLayout(gb) + + # Instructions + lbl = QLabel( + "Define expressions using x (indices), y (ROI values), and numpy (np).\n" + "Examples: y*2, np.log1p(y), (y-y.mean())/(y.std()+1e-9), clip(y,0,1000)" + ) + lbl.setWordWrap(True) + v.addWidget(lbl) + + # Input row: name + expression + add button + row = QHBoxLayout() + self.eq_name_edit = QLineEdit(); self.eq_name_edit.setPlaceholderText("Equation name (optional)") + self.eq_edit = QLineEdit(); self.eq_edit.setPlaceholderText("Enter expression e.g., np.log1p(y)") + self.btn_add = QPushButton("Add Equation") + self.btn_add.clicked.connect(self._on_add_equation) + row.addWidget(self.eq_name_edit) + row.addWidget(self.eq_edit) + row.addWidget(self.btn_add) + v.addLayout(row) + + # Buttons: recompute all, remove selected, clear all + btn_row = QHBoxLayout() + self.btn_recompute = QPushButton("Recompute & Plot All") + self.btn_recompute.clicked.connect(self._recompute_all) + self.btn_remove = QPushButton("Remove Selected") + self.btn_remove.clicked.connect(self._remove_selected) + self.btn_clear = QPushButton("Clear All") + self.btn_clear.clicked.connect(self._clear_all) + btn_row.addWidget(self.btn_recompute) + btn_row.addWidget(self.btn_remove) + btn_row.addWidget(self.btn_clear) + v.addLayout(btn_row) + + # List of equations + self.eq_list = QListWidget() + self.eq_list.itemDoubleClicked.connect(self._edit_equation_item) + v.addWidget(self.eq_list) + + parent_layout.addWidget(gb) + + def _next_color(self): + color = self._colors[self._color_index % len(self._colors)] + self._color_index += 1 + return pg.mkPen(color=color, width=1.5) + + def _safe_eval(self, expr: str): + """Safely evaluate an expression using restricted namespace. + Returns a numpy array of shape (N,) or a scalar. Raises on error. + """ + # Allowed names + allowed = { + 'np': np, + 'x': self.x, + 'y': self.y, + # Common numpy functions directly for convenience + 'sin': np.sin, 'cos': np.cos, 'log': np.log, 'exp': np.exp, 'sqrt': np.sqrt, + 'abs': np.abs, 'clip': np.clip, 'where': np.where, + } + # No builtins + globals_dict = {'__builtins__': {}} + return eval(expr, globals_dict, allowed) + + def _plot_curve(self, name: str, y_curve): + # Convert scalar to horizontal line + if np.isscalar(y_curve): + y_curve = np.full_like(self.x, float(y_curve), dtype=float) + else: + y_curve = np.asarray(y_curve, dtype=float) + + # Validate length + if y_curve.shape[0] != self.x.shape[0]: + raise ValueError(f"Expression result length {y_curve.shape[0]} does not match ROI length {self.x.shape[0]}") + + # Remove old curve if re-plotting + if name in self.math_items and self.math_items[name]['curve'] is not None: + try: + self.plot_item.removeItem(self.math_items[name]['curve']) + except Exception: + pass + self.math_items[name]['curve'] = None + + # Add new curve + pen = self._next_color() + curve = self.plot_item.plot(self.x, y_curve, pen=pen, name=name) + return curve + + def _on_add_equation(self): + expr = (self.eq_edit.text() or '').strip() + if not expr: + QMessageBox.information(self, "ROI Math", "Please enter an expression.") + return + name = (self.eq_name_edit.text() or '').strip() + if not name: + # Derive a default name + name = f"eq{len(self.math_items) + 1}" + + # Evaluate and plot + try: + result = self._safe_eval(expr) + curve = self._plot_curve(name, result) + except Exception as e: + QMessageBox.critical(self, "ROI Math Error", f"Could not evaluate expression:\n{expr}\n\n{e}") + return + + # Store and list + self.math_items[name] = {'expr': expr, 'curve': curve} + item = QListWidgetItem(f"{name}: {expr}") + item.setData(32, name) # store name for retrieval (Qt.UserRole=32) + self.eq_list.addItem(item) + # Clear inputs + self.eq_name_edit.clear() + self.eq_edit.clear() + + def _edit_equation_item(self, item: QListWidgetItem): + # Simple inline edit via reusing input boxes: load into edits + name = item.data(32) + if not name: + return + expr = self.math_items.get(name, {}).get('expr', '') + self.eq_name_edit.setText(name) + self.eq_edit.setText(expr) + + def _recompute_all(self): + # Recompute and update curves for all equations + for i in range(self.eq_list.count()): + item = self.eq_list.item(i) + name = item.data(32) + if not name: + continue + expr = self.math_items.get(name, {}).get('expr') + if not expr: + continue + try: + result = self._safe_eval(expr) + curve = self._plot_curve(name, result) + self.math_items[name]['curve'] = curve + except Exception as e: + QMessageBox.critical(self, "ROI Math Error", f"Error recomputing '{name}':\n{expr}\n\n{e}") + + def _remove_selected(self): + item = self.eq_list.currentItem() + if not item: + return + name = item.data(32) + # Remove curve + try: + curve = self.math_items.get(name, {}).get('curve') + if curve is not None: + self.plot_item.removeItem(curve) + except Exception: + pass + # Remove from storage and list + self.math_items.pop(name, None) + row = self.eq_list.row(item) + self.eq_list.takeItem(row) + + def _clear_all(self): + # Remove all curves + for name, rec in list(self.math_items.items()): + try: + if rec.get('curve') is not None: + self.plot_item.removeItem(rec['curve']) + except Exception: + pass + self.math_items.clear() + self.eq_list.clear() + + # Optional: method to update ROI data and recompute curves (future use) + def update_roi_data(self, roi_image: np.ndarray): + self.y = np.asarray(roi_image, dtype=np.float32).ravel() + self.x = np.arange(len(self.y)) + # Update base curve + try: + self.base_curve.setData(self.x, self.y) + except Exception: + pass + # Recompute all math curves + self._recompute_all() diff --git a/viewer/workbench/roi_plot_dock.py b/viewer/workbench/roi_plot_dock.py new file mode 100644 index 0000000..7de7b9b --- /dev/null +++ b/viewer/workbench/roi_plot_dock.py @@ -0,0 +1,447 @@ +#!/usr/bin/env python3 +""" +ROI Plot Dock for Workbench (Configurable X/Y Metrics) + +Provides a QDockWidget that displays a 1D plot of an ROI metric across frames. +You can choose what the X and Y axes represent via dropdowns: time, sum, min, +max, std. Includes a slider and an interactive vertical line to scrub frames; +stays in sync with the Workbench's frame spinbox. +""" + +from PyQt5.QtWidgets import ( + QDockWidget, QWidget, QVBoxLayout, QHBoxLayout, QSlider, QLabel, QComboBox +) +from PyQt5.QtCore import Qt +import numpy as np +import pyqtgraph as pg + +METRIC_OPTIONS = ["time", "sum", "min", "max", "std"] +AXIS_LABELS = { + "time": "Time (Frame Index)", + "sum": "ROI Sum", + "min": "ROI Min", + "max": "ROI Max", + "std": "ROI Std", +} + +class ROIPlotDock(QDockWidget): + def __init__(self, parent, title: str, main_window, roi): + super().__init__(title, parent) + self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) + self.main = main_window + self.roi = roi + # Ensure title starts with latest ROI name + try: + if hasattr(self.main, 'get_roi_name'): + self._update_title() + except Exception: + pass + + # Container for controls + plot + slider + container = QWidget(self) + layout = QVBoxLayout(container) + layout.setContentsMargins(6, 6, 6, 6) + layout.setSpacing(6) + + # Stats label above plot + self.stats_label = QLabel("ROI Stats: -") + try: + self.stats_label.setStyleSheet("color: #2c3e50; font-size: 11px;") + except Exception: + pass + layout.addWidget(self.stats_label) + + # Axis selection controls + controls_row = QHBoxLayout() + lbl_x = QLabel("X:"); lbl_y = QLabel("Y:") + self.x_select = QComboBox(); self.x_select.addItems(METRIC_OPTIONS) + self.y_select = QComboBox(); self.y_select.addItems(METRIC_OPTIONS) + # Defaults + try: + self.x_select.setCurrentText("time") + except Exception: + pass + try: + self.y_select.setCurrentText("sum") + except Exception: + pass + controls_row.addWidget(lbl_x) + controls_row.addWidget(self.x_select) + controls_row.addSpacing(12) + controls_row.addWidget(lbl_y) + controls_row.addWidget(self.y_select) + layout.addLayout(controls_row) + + # Plot setup + self.plot_item = pg.PlotItem() + self.plot_item.setLabel('bottom', AXIS_LABELS.get('time', 'Time')) + self.plot_item.setLabel('left', AXIS_LABELS.get('sum', 'ROI Sum')) + self.plot_widget = pg.PlotWidget(plotItem=self.plot_item) + layout.addWidget(self.plot_widget) + + # Vertical line to indicate current frame (positioned in X-space) + self.frame_line = pg.InfiniteLine(angle=90, movable=True, pen='c') + try: + self.plot_item.addItem(self.frame_line) + except Exception: + pass + + # Slider for scrubbing frames + self.slider = QSlider(Qt.Horizontal) + layout.addWidget(self.slider) + + self.setWidget(container) + + # Storage for series metrics + self.series = {m: np.array([0.0], dtype=float) for m in METRIC_OPTIONS} + + # Compute initial series and wire interactions + self._compute_time_series() + self._wire_interactions() + # Initial labels + self._update_axis_labels() + + def _get_roi_bounds(self): + """Return integer ROI bounds (x0, y0, w, h) based on ROI position/size.""" + try: + pos = self.roi.pos(); size = self.roi.size() + x0 = max(0, int(pos.x())); y0 = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + return x0, y0, w, h + except Exception: + return 0, 0, 1, 1 + + def _extract_roi_sub(self, frame, image_item): + """Extract ROI subarray from frame, respecting transforms if possible.""" + sub = None + # Try transform-aware extraction + try: + if image_item is not None: + sub = self.roi.getArrayRegion(frame, image_item) + if sub is not None and hasattr(sub, 'ndim') and sub.ndim > 2: + sub = np.squeeze(sub) + except Exception: + sub = None + # Fallback to axis-aligned bounding box + if sub is None or int(getattr(sub, 'size', 0)) == 0: + x0, y0, w, h = self._get_roi_bounds() + hgt, wid = frame.shape + x1 = min(wid, x0 + w); y1 = min(hgt, y0 + h) + if x0 < x1 and y0 < y1: + sub = frame[y0:y1, x0:x1] + else: + sub = None + return sub + + def _compute_time_series(self): + """Compute per-frame ROI metrics: sum, min, max, std, and time (index).""" + data = getattr(self.main, 'current_2d_data', None) + if data is None or not isinstance(data, np.ndarray): + # No data + self.series = {m: np.array([0.0], dtype=float) for m in METRIC_OPTIONS} + self.series['time'] = np.array([0], dtype=int) + self.slider.setEnabled(False) + self._update_stats_label() + self._update_plot() + return + + image_item = getattr(self.main.image_view, 'imageItem', None) if hasattr(self.main, 'image_view') else None + + if data.ndim == 3: + num_frames = data.shape[0] + sums, mins, maxs, stds = [], [], [], [] + for i in range(num_frames): + frame = np.asarray(data[i], dtype=np.float32) + sub = self._extract_roi_sub(frame, image_item) + if sub is not None and int(getattr(sub, 'size', 0)) > 0: + s = float(np.sum(sub)) + mn = float(np.min(sub)) + mx = float(np.max(sub)) + sd = float(np.std(sub)) + else: + s = 0.0; mn = 0.0; mx = 0.0; sd = 0.0 + sums.append(s); mins.append(mn); maxs.append(mx); stds.append(sd) + self.series = { + 'time': np.arange(num_frames, dtype=int), + 'sum': np.asarray(sums, dtype=float), + 'min': np.asarray(mins, dtype=float), + 'max': np.asarray(maxs, dtype=float), + 'std': np.asarray(stds, dtype=float), + } + self.slider.setEnabled(True) + try: + self.slider.setMinimum(0) + self.slider.setMaximum(max(num_frames - 1, 0)) + cur = 0 + if hasattr(self.main, 'frame_spinbox') and self.main.frame_spinbox.isEnabled(): + try: + cur = int(self.main.frame_spinbox.value()) + except Exception: + cur = 0 + self.slider.setValue(cur) + except Exception: + pass + else: + # 2D image -> single point (frame 0) + frame = np.asarray(data, dtype=np.float32) + sub = self._extract_roi_sub(frame, image_item) + if sub is not None and int(getattr(sub, 'size', 0)) > 0: + s = float(np.sum(sub)) + mn = float(np.min(sub)) + mx = float(np.max(sub)) + sd = float(np.std(sub)) + else: + s = 0.0; mn = 0.0; mx = 0.0; sd = 0.0 + self.series = { + 'time': np.array([0], dtype=int), + 'sum': np.array([s], dtype=float), + 'min': np.array([mn], dtype=float), + 'max': np.array([mx], dtype=float), + 'std': np.array([sd], dtype=float), + } + self.slider.setEnabled(False) + self._update_plot() + + def _update_axis_labels(self): + try: + x_name = self.x_select.currentText() + except Exception: + x_name = 'time' + try: + y_name = self.y_select.currentText() + except Exception: + y_name = 'sum' + try: + self.plot_item.setLabel('bottom', AXIS_LABELS.get(x_name, x_name)) + except Exception: + pass + try: + self.plot_item.setLabel('left', AXIS_LABELS.get(y_name, y_name)) + except Exception: + pass + + def _update_stats_label(self): + try: + # Refresh dock title to keep in sync with ROI name changes + try: + self._update_title() + except Exception: + pass + frame = None + try: + frame = self.main.get_current_frame_data() + except Exception: + frame = None + stats = None + if frame is not None and hasattr(self.main, 'roi_manager'): + try: + stats = self.main.roi_manager.compute_roi_stats(frame, self.roi) + except Exception: + stats = None + if stats: + text = (f"ROI [{stats['x']},{stats['y']} {stats['w']}x{stats['h']}] | " + f"sum={stats['sum']:.3f} min={stats['min']:.3f} max={stats['max']:.3f} " + f"mean={stats['mean']:.3f} std={stats['std']:.3f} count={stats['count']}") + else: + text = "ROI Stats: -" + try: + # Ensure label shows a consistent prefix + if text.startswith("ROI ["): + self.stats_label.setText(text) + else: + self.stats_label.setText(f"ROI Stats: {text}") + except Exception: + pass + except Exception: + pass + + def _update_plot(self): + try: + # Choose data by selection + try: + x_sel = self.x_select.currentText() + except Exception: + x_sel = 'time' + try: + y_sel = self.y_select.currentText() + except Exception: + y_sel = 'sum' + x_data = np.asarray(self.series.get(x_sel, self.series.get('time')), dtype=float) + y_data = np.asarray(self.series.get(y_sel, self.series.get('sum')), dtype=float) + + self.plot_item.clear() + self.plot_item.plot(x_data, y_data, pen='y') + # Re-add vertical line after clear + try: + self.plot_item.addItem(self.frame_line) + except Exception: + pass + # Position frame line to current frame value in x-space + cur = 0 + if hasattr(self.main, 'frame_spinbox') and self.main.frame_spinbox.isEnabled(): + try: + cur = int(self.main.frame_spinbox.value()) + except Exception: + cur = 0 + try: + if x_sel == 'time': + self.frame_line.setPos(cur) + else: + # Guard against index out of bounds + idx = np.clip(cur, 0, len(x_data) - 1) + self.frame_line.setPos(float(x_data[idx])) + except Exception: + pass + # Update axis labels + self._update_axis_labels() + except Exception: + # Fallback: simple plot call + try: + self.plot_widget.plot(self.series.get('time'), self.series.get('sum'), pen='y', clear=True) + except Exception: + pass + + def _update_title(self): + try: + name = None + try: + if hasattr(self.main, 'get_roi_name'): + name = self.main.get_roi_name(self.roi) + except Exception: + name = None + if not name: + name = 'ROI' + try: + self.setWindowTitle(f"ROI: {name}") + except Exception: + pass + except Exception: + pass + + def _wire_interactions(self): + # Slider -> change Workbench frame + try: + self.slider.valueChanged.connect(self._on_slider_changed) + except Exception: + pass + # Frame spinbox -> update slider and line + try: + if hasattr(self.main, 'frame_spinbox'): + self.main.frame_spinbox.valueChanged.connect(self._on_frame_spinbox_changed) + except Exception: + pass + # ROI changes -> recompute series + try: + if hasattr(self.roi, 'sigRegionChanged'): + self.roi.sigRegionChanged.connect(lambda: (self._compute_time_series(), self._update_stats_label())) + if hasattr(self.roi, 'sigRegionChangeFinished'): + self.roi.sigRegionChangeFinished.connect(lambda: (self._compute_time_series(), self._update_stats_label())) + except Exception: + pass + # Dragging the vertical line should also update frame + try: + self.frame_line.sigPositionChanged.connect(self._on_line_moved) + except Exception: + pass + # Axis selection changes -> replot + try: + self.x_select.currentTextChanged.connect(lambda _: self._update_plot()) + self.y_select.currentTextChanged.connect(lambda _: self._update_plot()) + except Exception: + pass + + def _on_slider_changed(self, value): + # Update Workbench frame and vertical line + try: + if hasattr(self.main, 'frame_spinbox') and self.main.frame_spinbox.isEnabled(): + self.main.frame_spinbox.setValue(int(value)) + except Exception: + pass + try: + # Update line position in current x-space + x_sel = self.x_select.currentText() if hasattr(self, 'x_select') else 'time' + if x_sel == 'time': + self.frame_line.setPos(int(value)) + else: + x_data = np.asarray(self.series.get(x_sel, self.series.get('time')), dtype=float) + idx = np.clip(int(value), 0, len(x_data) - 1) + self.frame_line.setPos(float(x_data[idx])) + except Exception: + pass + try: + self._update_stats_label() + except Exception: + pass + + def _on_frame_spinbox_changed(self, value): + # Keep slider and line in sync with Workbench + try: + self.slider.blockSignals(True) + self.slider.setValue(int(value)) + except Exception: + pass + try: + # Update line position in current x-space + x_sel = self.x_select.currentText() if hasattr(self, 'x_select') else 'time' + if x_sel == 'time': + self.frame_line.setPos(int(value)) + else: + x_data = np.asarray(self.series.get(x_sel, self.series.get('time')), dtype=float) + idx = np.clip(int(value), 0, len(x_data) - 1) + self.frame_line.setPos(float(x_data[idx])) + except Exception: + pass + try: + self.slider.blockSignals(False) + except Exception: + pass + try: + self._update_stats_label() + except Exception: + pass + + def _on_line_moved(self): + # When line is dragged, snap to nearest value and update frame + try: + pos_val = float(self.frame_line.value()) + except Exception: + pos_val = 0.0 + # Determine new frame index from x-space + try: + x_sel = self.x_select.currentText() if hasattr(self, 'x_select') else 'time' + except Exception: + x_sel = 'time' + try: + if x_sel == 'time': + pos = int(round(pos_val)) + else: + x_data = np.asarray(self.series.get(x_sel, self.series.get('time')), dtype=float) + # Find nearest frame index by metric value + if len(x_data) == 0: + pos = 0 + else: + pos = int(np.argmin(np.abs(x_data - pos_val))) + # Clamp + if hasattr(self.main, 'frame_spinbox') and self.main.frame_spinbox.isEnabled(): + max_idx = int(self.slider.maximum()) if hasattr(self.slider, 'maximum') else len(self.series.get('time', [])) - 1 + pos = int(np.clip(pos, 0, max_idx)) + except Exception: + pos = 0 + try: + self.frame_line.blockSignals(True) + # Reposition line to exact x-space of selected frame + x_sel = self.x_select.currentText() if hasattr(self, 'x_select') else 'time' + if x_sel == 'time': + self.frame_line.setPos(int(pos)) + else: + x_data = np.asarray(self.series.get(x_sel, self.series.get('time')), dtype=float) + idx = np.clip(int(pos), 0, len(x_data) - 1) + self.frame_line.setPos(float(x_data[idx])) + self.frame_line.blockSignals(False) + except Exception: + pass + self._on_slider_changed(pos) + try: + self._update_stats_label() + except Exception: + pass diff --git a/viewer/workbench/tabs/__init__.py b/viewer/workbench/tabs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/viewer/workbench/tabs/base_tab.py b/viewer/workbench/tabs/base_tab.py new file mode 100644 index 0000000..7230938 --- /dev/null +++ b/viewer/workbench/tabs/base_tab.py @@ -0,0 +1,52 @@ +from PyQt5.QtWidgets import QWidget +from PyQt5 import uic + +class BaseTab(QWidget): + """ + Base class for all tabs in the Workbench. + Provides common functionality and a consistent interface. + """ + def __init__(self, ui_file, parent=None, main_window=None, title=""): + super().__init__(parent) + self.main_window = main_window + self.title = title + uic.loadUi(ui_file, self) + self.setObjectName(self.__class__.__name__) # Set object name for easier identification + self.setup() + + def setup(self): + self.main_window.tabWidget_analysis.addTab(self, self.title) + + def on_tab_selected(self): + """ + Called when this tab is selected. + Can be overridden by subclasses to perform tab-specific actions. + """ + pass + + def on_tab_deselected(self): + """ + Called when this tab is deselected. + Can be overridden by subclasses to perform tab-specific actions. + """ + pass + + def update_data(self, data_path: str): + """ + Called when new data is loaded or the selected dataset changes. + Subclasses should implement this to update their display. + """ + pass + + def clear_data(self): + """ + Called when the HDF5 file is closed or cleared. + Subclasses should implement this to clear their display. + """ + pass + + def get_tab_name(self) -> str: + """ + Returns the display name for the tab. + """ + return self.__class__.__name__.replace('Tab', '') # Default to class name without 'Tab' diff --git a/viewer/workbench/tabs/workspace_3d.py b/viewer/workbench/tabs/workspace_3d.py new file mode 100644 index 0000000..ba73871 --- /dev/null +++ b/viewer/workbench/tabs/workspace_3d.py @@ -0,0 +1,1057 @@ +from typing import Optional +import os +from PyQt5.QtWidgets import QDockWidget, QWidget, QVBoxLayout, QLabel, QMessageBox, QFileDialog, QSizePolicy +from PyQt5.QtCore import Qt, QThread +import numpy as np +import pyvista as pv + + +# Import BaseTab using existing tabs package alias +from .base_tab import BaseTab + +# Import 3D visualization components +try: + import pyvista as pyv + from pyvistaqt import QtInteractor + PYVISTA_AVAILABLE = True +except ImportError: + PYVISTA_AVAILABLE = False + +# Worker for off-UI-thread 3D prep +from viewer.workbench.workers import Render3D +from utils.hdf5_loader import HDF5Loader, discover_hkl_axis_labels +from utils.rsm_converter import RSMConverter + +class Workspace3D(BaseTab): + """ + 3D Tab encapsulating 3D viewer setup, loading, and plotting operations. + Delegates UI widget access via main_window, but centralizes 3D actions here. + """ + def __init__(self, parent=None, main_window=None, title="3D View"): + pv.set_plot_theme('dark') + try: + super().__init__(ui_file='gui/workbench/tabs/tab_3d.ui', parent=parent, main_window=main_window, title=title) + self.title = title + self.main_window = main_window + self.build() + self.connect_all() + except Exception as e: + print(e) + + def connect_all(self): + """Wire up 3D controls to main window handlers.""" + try: + self.btn_load_3d_data.clicked.connect(self.load_data) + self.cb_colormap_3d.currentTextChanged.connect(self.on_3d_colormap_changed) + self.cb_show_points.toggled.connect(self.toggle_3d_points) + self.cb_show_slice.toggled.connect(self.toggle_3d_slice) + self.sb_min_intensity_3d.editingFinished.connect(self.update_intensity) + self.sb_max_intensity_3d.editingFinished.connect(self.update_intensity) + except Exception as e: + try: + self.main_window.update_status(f"Error setting up 3D connections: {e}") + except Exception: + pass + + def build(self): + # Try to create VTK QtInteractor; fall back if unavailable + try: + self.plotter = QtInteractor(self) + except Exception: + self.plotter = None + + self.hkl_info_label = None + + if self.plotter is None: + placeholder = QLabel("3D (VTK) unavailable in tunnel mode.") + try: + placeholder.setAlignment(Qt.AlignCenter) + except Exception: + pass + try: + placeholder.setWordWrap(True) + except Exception: + pass + try: + self.container.insertWidget(1, placeholder, stretch=1) + except Exception: + pass + # Disable 3D controls that would require the plotter + for w in [getattr(self, "btn_load_3d_data", None), + getattr(self, "cb_show_points", None), + getattr(self, "cb_show_slice", None), + getattr(self, "sb_min_intensity_3d", None), + getattr(self, "sb_max_intensity_3d", None)]: + try: + if w is not None: + w.setEnabled(False) + except Exception: + pass + # Initialize defaults + self.cloud_mesh_3d = None + self.slab_actor = None + self.plane_widget = None + self.lut = None + self.lut2 = None + # Default target raster shape (HxW) for slice rasterization + self.orig_shape = (0, 0) + self.curr_shape = (0, 0) + # Slice & Camera defaults + self._slice_translate_step = 0.01 + self._slice_rotate_step_deg = 1.0 + self._custom_normal = np.array([0.0, 0.0, 1.0], dtype=float) + self._zoom_step = 1.5 + return + + # If plotter exists, proceed to embed and configure + self.container.insertWidget(1, self.plotter, stretch=1) + try: + self.scrollArea_3d_controls.setMinimumWidth(280) + except Exception: + pass + try: + self.plotter.add_axes(xlabel='H', ylabel='K', zlabel='L', x_color='red', y_color='green', z_color='blue') + except Exception: + pass + + # Color axes: H=red (X), K=green (Y), L=blue (Z) + try: + ca = getattr(self.plotter.renderer, 'cube_axes_actor', None) + if ca: + ca.GetXAxesLinesProperty().SetColor(1.0, 0.0, 0.0) + ca.GetYAxesLinesProperty().SetColor(0.0, 1.0, 0.0) + ca.GetZAxesLinesProperty().SetColor(0.0, 0.0, 1.0) + ca.GetTitleTextProperty(0).SetColor(1.0, 0.0, 0.0) + ca.GetTitleTextProperty(1).SetColor(0.0, 1.0, 0.0) + ca.GetTitleTextProperty(2).SetColor(0.0, 0.0, 1.0) + ca.GetLabelTextProperty(0).SetColor(1.0, 0.0, 0.0) + ca.GetLabelTextProperty(1).SetColor(0.0, 1.0, 0.0) + ca.GetLabelTextProperty(2).SetColor(0.0, 0.0, 1.0) + except Exception: + pass + self.cloud_mesh_3d = None + self.slab_actor = None + self.plane_widget = None + # Initialize LUTs similar to viewer/hkl_3d.py + try: + self.lut = pv.LookupTable(cmap='jet') + self.lut.apply_opacity([0, 1]) + self.lut2 = pv.LookupTable(cmap='jet') + self.lut2.apply_opacity([0, 1]) + except Exception: + self.lut = None + self.lut2 = None + # Default target raster shape (HxW) for slice rasterization + self.orig_shape = (0, 0) + self.curr_shape = (0, 0) + # Slice & Camera defaults + self._slice_translate_step = 0.01 + self._slice_rotate_step_deg = 1.0 + self._custom_normal = np.array([0.0, 0.0, 1.0], dtype=float) + self._zoom_step = 1.5 + # Cached true data intensity bounds (set on data load) + self._data_intensity_min = None + self._data_intensity_max = None + + + def setup_plot_viewer(self): + """ + Create and embed a PyVista QtInteractor into the 3D tab container. + """ + mw = self.main_window + try: + if not PYVISTA_AVAILABLE: + return + pyv.set_plot_theme('dark') + mw.plotter_3d = QtInteractor() + mw.plotter_3d.add_axes(xlabel='H', ylabel='K', zlabel='L', x_color='red', y_color='green', z_color='blue') + if hasattr(mw, 'layout3DPlotHost') and mw.layout3DPlotHost is not None: + try: + # layout3DPlotHost may be a grid layout from the UI + mw.layout3DPlotHost.addWidget(mw.plotter_3d, 0, 0) + except Exception: + mw.layout3DPlotHost.addWidget(mw.plotter_3d) + else: + print("Warning: layout3DPlotHost not found, 3D plot may not display correctly") + # Info dock + try: + self._setup_info_dock() + except Exception: + pass + # Clear initial state + self.clear_plot() + except Exception as e: + try: + mw.update_status(f"Error setting up 3D plot viewer: {e}") + except Exception: + pass + + def _setup_info_dock(self): + """Create a small 3D Info dock to display render metrics (e.g., render time).""" + mw = self.main_window + try: + mw.three_d_info_dock = QDockWidget("3D Info", mw) + mw.three_d_info_dock.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) + container = QWidget(mw.three_d_info_dock) + layout = QVBoxLayout(container) + mw.three_d_info_label = QLabel("Render time: - ms") + layout.addWidget(mw.three_d_info_label) + mw.three_d_info_dock.setWidget(container) + mw.addDockWidget(Qt.RightDockWidgetArea, mw.three_d_info_dock) + try: + mw.add_dock_toggle_action(mw.three_d_info_dock, "3D Info", segment_name="3d") + except Exception: + pass + except Exception as e: + try: + mw.update_status(f"Error setting up 3D info dock: {e}") + except Exception: + pass + + def toggle_3d_points(self, checked: bool): + """Shows/Hides the main HKL point cloud.""" + try: + # Support either actor name used by different paths + actor = None + if "points" in getattr(self.plotter, 'actors', {}): + actor = self.plotter.actors.get("points") + elif "cloud_volume" in getattr(self.plotter, 'actors', {}): + actor = self.plotter.actors.get("cloud_volume") + if actor is not None: + actor.SetVisibility(bool(checked)) + self.plotter.render() + except Exception: + pass + + def toggle_3d_slice(self, checked: bool): + """Shows/Hides the interactive plane and the extracted slice points.""" + try: + # Toggle the points extracted by the plane + if "slab_points" in getattr(self.plotter, 'actors', {}): + try: + self.plotter.actors["slab_points"].SetVisibility(bool(checked)) + except Exception: + try: + self.plotter.renderer._actors["slab_points"].SetVisibility(bool(checked)) + except Exception: + pass + + # Toggle the interactive plane widget tool + if self.plane_widget is not None: + try: + if checked: + # Try both enable methods to support different versions + try: + self.plane_widget.EnabledOn() + except Exception: + self.plane_widget.On() + else: + try: + self.plane_widget.EnabledOff() + except Exception: + self.plane_widget.Off() + except Exception: + pass + else: + # Fallback: use plotter.plane_widgets list if available + widgets = getattr(self.plotter, "plane_widgets", []) + for pw in widgets or []: + try: + if checked: + pw.EnabledOn() + else: + pw.EnabledOff() + except Exception: + pass + + self.plotter.render() + except Exception: + pass + + def on_3d_colormap_changed(self): + """Apply selected colormap to the points (and slab if available).""" + try: + cmap_name = getattr(self.cb_colormap_3d, 'currentText', lambda: 'viridis')() + except Exception: + cmap_name = 'viridis' + # Primary LUT used for the main cloud volume/points + try: + self.lut = pv.LookupTable(cmap=cmap_name) + self.lut.apply_opacity([0, 1]) + self.lut2 = pv.LookupTable(cmap=cmap_name) + self.lut2.apply_opacity([0, 1]) + except Exception: + self.lut = None + self.lut2 = None + + # Update points/cloud actor by changing the mapper's lookup table (no re-add) + try: + actors = getattr(self.plotter, 'actors', {}) or {} + tgt_name = 'points' if 'points' in actors else ('cloud_volume' if 'cloud_volume' in actors else None) + if tgt_name and self.lut is not None: + actor = actors.get(tgt_name) + try: + # Prefer direct mapper property + actor.mapper.lookup_table = self.lut + except Exception: + try: + actor.GetMapper().SetLookupTable(self.lut) + except Exception: + pass + # Maintain scalar range and visibility + try: + rng = [self.sb_min_intensity_3d.value(), self.sb_max_intensity_3d.value()] + actor.mapper.scalar_range = rng + except Exception: + pass + try: + actor.SetVisibility(bool(self.cb_show_points.isChecked())) + except Exception: + pass + except Exception: + pass + + # Attempt to update slab colormap (best-effort) using a separate LUT + try: + if self.slab_actor is not None: + try: + try: + # Prefer self.lut2 if available + self.slab_actor.mapper.lookup_table = (self.lut2 or self.lut) + except Exception: + try: + self.slab_actor.GetMapper().SetLookupTable(self.lut2 or self.lut) + except Exception: + pass + except Exception: + pass + # Keep visibility consistent + try: + self.slab_actor.SetVisibility(bool(self.cb_show_slice.isChecked())) + except Exception: + pass + except Exception: + pass + + # Render and ensure ranges/visibility remain in sync + try: + # Update existing scalar bars with the new LUT + if hasattr(self.plotter, 'scalar_bars') and self.lut is not None: + for _, scalar_bar in self.plotter.scalar_bars.items(): + try: + scalar_bar.SetLookupTable(self.lut) + scalar_bar.Modified() + except Exception: + pass + self.plotter.render() + except Exception: + pass + try: + self.update_intensity() + except Exception: + pass + + def update_info(self, render_ms: int): + """Update the 3D info dock with render timing in milliseconds.""" + mw = self.main_window + try: + if hasattr(mw, 'three_d_info_label') and mw.three_d_info_label is not None: + mw.three_d_info_label.setText(f"Render time: {int(render_ms)} ms") + except Exception: + pass + + # === Clear === + def clear_plot(self): + try: + if hasattr(self, 'plotter') and self.plotter is not None: + self.plotter.clear() + self.current_3d_data = None + self.mesh = None + except Exception as e: + try: + self.main_window.update_status(f"Error clearing 3D plot: {e}") + except Exception: + pass + + # === Loading & Plotting === + def load_data(self): + """Load dataset and render using the tab's local plotter.""" + print("Loading data into 3D viewer...") + mw = self.main_window + import time as _time + start_all = _time.perf_counter() + + try: + if not PYVISTA_AVAILABLE: + QMessageBox.warning(self, "3D Viewer", "PyVista is not available.") + return + + # 1. Get the file path + file_path = getattr(mw, 'current_file_path', None) or getattr(mw, 'selected_dataset_path', None) + if not file_path: + file_name, _ = QFileDialog.getOpenFileName( + self, 'Select HDF5 or VTI File', '', 'HDF5 Files (*.h5 *.hdf5 *.vti);;All Files (*)' + ) + if not file_name: return + file_path = file_name + conv = RSMConverter() + # 2. Load the raw data + # if the data is uncompressed + data = conv.load_h5_to_3d(file_path) + points, intensities, num_images, shape = data + + # 3. Define what happens when the worker finishes processing + def _on_ready(): + try: + # IMPORTANT: Tell the worker to plot to THIS tab's plotter + # We pass 'self.plotter' instead of 'mw' + self._render3d_worker.plot_3d_points(self) + # Cache a reference to the main points/cloud actor for fast updates + try: + if "points" in self.plotter.actors: + self.points_actor = self.plotter.actors.get("points") + elif "cloud_volume" in self.plotter.actors: + self.points_actor = self.plotter.actors.get("cloud_volume") + except Exception: + self.points_actor = None + + # Cache true data intensity bounds and set LUT scalar ranges + try: + self._data_intensity_min = float(np.min(intensities)) + self._data_intensity_max = float(np.max(intensities)) + if self.lut is not None: + self.lut.scalar_range = (self._data_intensity_min, self._data_intensity_max) + if self.lut2 is not None: + self.lut2.scalar_range = (self._data_intensity_min, self._data_intensity_max) + except Exception: + pass + # Apply LUTs to actors + try: + if self.points_actor is not None and self.lut is not None: + self.points_actor.mapper.lookup_table = self.lut + except Exception: + pass + try: + if self.slab_actor is not None and (self.lut2 or self.lut) is not None: + self.slab_actor.mapper.lookup_table = (self.lut2 or self.lut) + except Exception: + pass + # Show bounds like in hkl_3d + try: + self.plotter.show_bounds( + mesh=self.points_actor.mapper.input if self.points_actor is not None else None, + xtitle='H Axis', ytitle='K Axis', ztitle='L Axis', + ticks='inside', minor_ticks=True, + n_xlabels=7, n_ylabels=7, n_zlabels=7, + x_color='red', y_color='green', z_color='blue', + font_size=20 + ) + except Exception: + pass + # Sync scalar bars to primary LUT + try: + if hasattr(self.plotter, 'scalar_bars') and self.lut is not None: + for _, sb in self.plotter.scalar_bars.items(): + try: + sb.SetLookupTable(self.lut) + sb.Modified() + except Exception: + pass + except Exception: + pass + # Ensure visibility respects checkboxes + try: + self.toggle_3d_points(self.cb_show_points.isChecked()) + self.toggle_3d_slice(self.cb_show_slice.isChecked()) + except Exception: + pass + + # Align current intensity range with data bounds and reflect in UI + try: + self.update_intensity() + except Exception: + pass + + # Switch to this tab automatically + if hasattr(mw, 'tabWidget_analysis'): + idx = mw.tabWidget_analysis.indexOf(self) + mw.tabWidget_analysis.setCurrentIndex(idx) + + self.main_window.update_status("3D Rendering Complete") + except Exception as e: + print(f"Render Error: {e}") + + # 4. Threaded Execution + self._render_thread = QThread(self) + self._render3d_worker = Render3D( + points=points, + intensities=intensities, + num_images=num_images, + shape=shape + ) + + self._render3d_worker.moveToThread(self._render_thread) + + # Connect signals + self._render_thread.started.connect(self._render3d_worker.run) + self._render3d_worker.render_ready.connect(_on_ready) # Use the local plotter + + # Cleanup + self._render3d_worker.finished.connect(self._render_thread.quit) + self._render3d_worker.finished.connect(self._render3d_worker.deleteLater) + self._render_thread.finished.connect(self._render_thread.deleteLater) + + self._render_thread.start() + + except Exception as e: + QMessageBox.critical(self, "3D Viewer Error", f"Error: {str(e)}") + finally: + elapsed = int((_time.perf_counter() - start_all) * 1000) + self.update_info(elapsed) + + def on_plane_update(self, normal, origin): + """Extracts points near the plane to simulate a 3D slice.""" + if self.cloud_mesh_3d is None: + return + + # Plane math: (Point - Origin) ⋅ Normal + vec = self.cloud_mesh_3d.points - origin + dist = np.dot(vec, normal) + + # Thickness of the slice in HKL units (align with HKL3D) + thickness = 0.002 + mask = np.abs(dist) < thickness + + slab = self.cloud_mesh_3d.extract_points(mask) + + if slab.n_points > 0: + self.slab_actor = self.plotter.add_mesh( + slab, + name="slab_points", + render_points_as_spheres=True, + point_size=8, + scalars='intensity', + cmap=(self.lut2 or self.lut), + show_scalar_bar=False + ) + # Ensure the new slab respects the current checkbox state + self.slab_actor.SetVisibility(self.cb_show_slice.isChecked()) + + # Match current intensity + clim = [self.sb_min_intensity_3d.value(), self.sb_max_intensity_3d.value()] + self.slab_actor.mapper.scalar_range = clim + + # Keep plane widget synchronized to final state + try: + widgets = getattr(self.plotter, 'plane_widgets', []) + if self.plane_widget is not None: + self.plane_widget.SetNormal(normal) + self.plane_widget.SetOrigin(origin) + elif widgets: + widgets[0].SetNormal(normal) + widgets[0].SetOrigin(origin) + except Exception: + pass + + self.plotter.render() + # Respect slice toggle state after update + try: + self.toggle_3d_slice(self.cb_show_slice.isChecked()) + except Exception: + pass + + # Update the 3D Info dock with HKL slice information + try: + info_dock = getattr(self.main_window, 'info_3d_dock', None) + if info_dock is not None: + shape = None + try: + shape = tuple(getattr(self, 'curr_shape', None) or ()) + if not (isinstance(shape, tuple) and len(shape) == 2): + shape = (0, 0) + except Exception: + shape = (0, 0) + info_dock.update_from_slice( + slab, + np.asarray(normal, dtype=float), + np.asarray(origin, dtype=float), + target_shape=shape + ) + except Exception: + pass + + def update_intensity(self): + """Updates the min/max intensity levels and scalar bar range""" + if not self.plotter: + return + + # Read requested values from UI + try: + requested_min = float(self.sb_min_intensity_3d.value()) + requested_max = float(self.sb_max_intensity_3d.value()) + except Exception: + # Fallback to current mapper range if spinboxes unavailable + requested_min, requested_max = 0.0, 1.0 + + # Clamp to true data range if available + data_min = getattr(self, '_data_intensity_min', None) + data_max = getattr(self, '_data_intensity_max', None) + vmin = requested_min + vmax = requested_max + if data_min is not None and data_max is not None: + vmin = max(requested_min, data_min) + vmax = min(requested_max, data_max) + + # Enforce ordering and non-zero span + if vmin > vmax: + vmin, vmax = vmax, vmin + if vmin == vmax: + vmax = vmin + 1e-6 + + # Reflect applied values back to the UI + try: + self.sb_min_intensity_3d.setValue(vmin) + self.sb_max_intensity_3d.setValue(vmax) + except Exception: + pass + + # Define the new scalar range + new_range = [vmin, vmax] + + # Update main cloud/points actor scalar range + try: + actors = getattr(self.plotter, 'actors', {}) or {} + if "points" in actors: + actors["points"].mapper.scalar_range = (new_range[0], new_range[1]) + if "cloud_volume" in actors: + actors["cloud_volume"].mapper.scalar_range = (new_range[0], new_range[1]) + except Exception: + pass + + if "slab_points" in self.plotter.actors: + self.plotter.actors["slab_points"].mapper.scalar_range = (new_range[0], new_range[1]) + + # Update the volume actor by re-adding with new clim range + if hasattr(self.plotter, 'scalar_bars'): + for bar in self.plotter.scalar_bars.values(): + try: + bar.GetLookupTable().SetTableRange(new_range[0], new_range[1]) + except Exception: + pass + + # Force update of all scalar bars with the new range + if hasattr(self.plotter, 'scalar_bars'): + for name, scalar_bar in self.plotter.scalar_bars.items(): + if scalar_bar: + try: + scalar_bar.GetLookupTable().SetTableRange(new_range[0], new_range[1]) + scalar_bar.Modified() + except Exception: + pass + + # Update slice actor scalar range if it exists + if "slice" in self.plotter.actors: + slice_actor = self.plotter.actors["slice"] + if hasattr(slice_actor, 'mapper'): + try: + slice_actor.mapper.scalar_range = (new_range[0], new_range[1]) + except Exception: + pass + + # Force a re-render to apply the changes + self.plotter.render() + # Respect checkbox states after intensity update + try: + self.toggle_3d_points(self.cb_show_points.isChecked()) + self.toggle_3d_slice(self.cb_show_slice.isChecked()) + except Exception: + pass + + # Update Info labels and availability after intensity changes (best-effort) + try: + self.update_info_slice_labels() + self._refresh_availability() + except Exception: + pass + + # === Visibility & Colormap === + + + def reset_slice(self): + """Reset slice to HK (xy) preset at the data center.""" + try: + # Determine a reasonable center + origin = None + try: + if self.cloud_mesh_3d is not None and hasattr(self.cloud_mesh_3d, 'center'): + origin = np.array(self.cloud_mesh_3d.center, dtype=float) + elif self.mesh is not None and hasattr(self.mesh, 'center'): + origin = np.array(self.mesh.center, dtype=float) + except Exception: + origin = None + if origin is None: + origin = np.array([0.0, 0.0, 0.0], dtype=float) + # Normal along L for HK plane + normal = np.array([0.0, 0.0, 1.0], dtype=float) + self.set_plane_state(normal, origin) + except Exception as e: + try: + self.main_window.update_status(f"Error resetting 3D slice: {e}") + except Exception: + pass + + def _remove_plane_widget(self): + """Safely remove existing plane widget (if any).""" + try: + # Use the same attribute name that is set by the Render3D worker + if self.plane_widget is not None: + try: + self.plane_widget.EnabledOff() + except Exception: + pass + + try: + self.plotter.clear_plane_widgets() + except Exception: + pass + + self.plane_widget = None + except Exception: + pass + + def toggle_pointer(self, checked: bool): + """Enable/Disable the interactive plane widget and show/hide slab points.""" + try: + # Plane widget visibility + if self.plane_widget is not None: + try: + if checked: + self.plane_widget.On() + else: + self.plane_widget.Off() + except Exception: + pass + else: + # Fallback: use plotter.plane_widgets list if available + widgets = getattr(self.plotter, "plane_widgets", []) + for pw in widgets or []: + try: + if checked: + pw.EnabledOn() + else: + pw.EnabledOff() + except Exception: + pass + # Slab points actor visibility + if "slab_points" in self.plotter.actors: + try: + self.plotter.actors["slab_points"].SetVisibility(bool(checked)) + except Exception: + try: + self.plotter.renderer._actors["slab_points"].SetVisibility(bool(checked)) + except Exception: + pass + self.plotter.render() + except Exception: + pass + + # ===== Info/Availability (align with hkl_3d patterns) ===== + + def _refresh_availability(self): + """Enable/disable controls depending on plotter/data availability.""" + try: + has_data = bool(self.cloud_mesh_3d is not None or getattr(self, 'points_actor', None) is not None) + for w in [getattr(self, "cb_show_points", None), + getattr(self, "cb_show_slice", None), + getattr(self, "sb_min_intensity_3d", None), + getattr(self, "sb_max_intensity_3d", None)]: + try: + if w is not None: + w.setEnabled(has_data) + except Exception: + pass + except Exception: + pass + + # ===== Slice Plane helpers ===== + def get_plane_state(self): + """Return (normal, origin) for current plane; defaults to Z-axis and mesh center.""" + try: + if self.plane_widget is not None: + try: + normal = np.array(self.plane_widget.GetNormal(), dtype=float) + origin = np.array(self.plane_widget.GetOrigin(), dtype=float) + return normal, origin + except Exception: + pass + # Fallback to first plane widget if present + widgets = getattr(self.plotter, 'plane_widgets', []) + if widgets: + pw = widgets[0] + try: + normal = np.array(pw.GetNormal(), dtype=float) + origin = np.array(pw.GetOrigin(), dtype=float) + return normal, origin + except Exception: + pass + except Exception: + pass + # Defaults + normal = np.array([0.0, 0.0, 1.0], dtype=float) + try: + if self.cloud_mesh_3d is not None and hasattr(self.cloud_mesh_3d, 'center'): + origin = np.array(self.cloud_mesh_3d.center, dtype=float) + elif self.mesh is not None and hasattr(self.mesh, 'center'): + origin = np.array(self.mesh.center, dtype=float) + else: + origin = np.array([0.0, 0.0, 0.0], dtype=float) + except Exception: + origin = np.array([0.0, 0.0, 0.0], dtype=float) + return normal, origin + + def set_plane_state(self, normal, origin): + """Programmatically set plane state and trigger slice update.""" + try: + n = self.normalize_vector(np.array(normal, dtype=float)) + o = np.array(origin, dtype=float) + # Update widget if available + if self.plane_widget is not None: + try: + self.plane_widget.SetNormal(n) + self.plane_widget.SetOrigin(o) + except Exception: + pass + else: + widgets = getattr(self.plotter, 'plane_widgets', []) + if widgets: + try: + widgets[0].SetNormal(n) + widgets[0].SetOrigin(o) + except Exception: + pass + # Refresh slice + try: + self.on_plane_update(n, o) + except Exception: + pass + except Exception: + pass + + @staticmethod + def normalize_vector(v): + try: + v = np.array(v, dtype=float) + norm = float(np.linalg.norm(v)) + if norm <= 0.0: + return np.array([0.0, 0.0, 1.0], dtype=float) + return v / norm + except Exception: + return np.array([0.0, 0.0, 1.0], dtype=float) + + def set_custom_normal(self, n): + try: + self._custom_normal = np.array(n, dtype=float) + except Exception: + self._custom_normal = np.array([0.0, 0.0, 1.0], dtype=float) + + def set_plane_preset(self, preset_text: str): + """Set plane normal to preset HK/KL/HL or custom vector.""" + try: + preset = (preset_text or '').lower() + except Exception: + preset = '' + if ('xy' in preset) or ('hk' in preset): + n = np.array([0.0, 0.0, 1.0], dtype=float) + elif ('yz' in preset) or ('kl' in preset): + n = np.array([1.0, 0.0, 0.0], dtype=float) + elif ('xz' in preset) or ('hl' in preset): + n = np.array([0.0, 1.0, 0.0], dtype=float) + else: + # Custom + n = self.normalize_vector(getattr(self, '_custom_normal', np.array([0.0, 0.0, 1.0], dtype=float))) + _, origin = self.get_plane_state() + self.set_plane_state(n, origin) + + # ===== Translation ===== + def nudge_along_normal(self, sign: int): + try: + normal, origin = self.get_plane_state() + step = float(getattr(self, '_slice_translate_step', 0.01)) + origin_new = origin + float(sign) * step * normal + self.set_plane_state(normal, origin_new) + except Exception: + pass + + def nudge_along_axis(self, axis: str, sign: int): + try: + axis = (axis or 'H').upper() + if axis == 'H': + d = np.array([1.0, 0.0, 0.0], dtype=float) + elif axis == 'K': + d = np.array([0.0, 1.0, 0.0], dtype=float) + else: + d = np.array([0.0, 0.0, 1.0], dtype=float) + normal, origin = self.get_plane_state() + step = float(getattr(self, '_slice_translate_step', 0.01)) + origin_new = origin + float(sign) * step * d + self.set_plane_state(normal, origin_new) + except Exception: + pass + + # ===== Rotation ===== + def rotate_about_axis(self, axis: str, deg: float): + try: + axis = (axis or 'H').upper() + if axis == 'H': + u = np.array([1.0, 0.0, 0.0], dtype=float) + elif axis == 'K': + u = np.array([0.0, 1.0, 0.0], dtype=float) + else: + u = np.array([0.0, 0.0, 1.0], dtype=float) + normal, origin = self.get_plane_state() + theta = float(np.deg2rad(deg)) + ux, uy, uz = u + c, s = np.cos(theta), np.sin(theta) + R = np.array([ + [c+ux*ux*(1-c), ux*uy*(1-c)-uz*s, ux*uz*(1-c)+uy*s], + [uy*ux*(1-c)+uz*s, c+uy*uy*(1-c), uy*uz*(1-c)-ux*s], + [uz*ux*(1-c)-uy*s, uz*uy*(1-c)+ux*s, c+uz*uz*(1-c)] + ], dtype=float) + new_normal = R @ normal + new_normal = self.normalize_vector(new_normal) + self.set_plane_state(new_normal, origin) + except Exception: + pass + + # ===== Camera ===== + def zoom_in(self): + try: + step = float(getattr(self, '_zoom_step', 1.5)) + if step <= 1.0: + step = 1.5 + self.plotter.camera.zoom(step) + self.plotter.render() + except Exception: + pass + + def zoom_out(self): + try: + step = float(getattr(self, '_zoom_step', 1.5)) + if step <= 1.0: + step = 1.5 + self.plotter.camera.zoom(1.0 / step) + self.plotter.render() + except Exception: + pass + + def reset_camera(self): + try: + self.plotter.reset_camera() + self.plotter.render() + except Exception: + pass + + def set_camera_position(self, preset: str): + try: + txt = (preset or '').strip().lower() + p = self.plotter + cam = getattr(p, 'camera', None) + # center focus + try: + if self.cloud_mesh_3d is not None and hasattr(self.cloud_mesh_3d, 'center'): + p.set_focus(self.cloud_mesh_3d.center) + except Exception: + pass + if txt in ('hk', 'xy'): + p.view_xy() + elif txt in ('kl', 'yz'): + p.view_yz() + elif txt in ('hl', 'xz'): + p.view_xz() + elif 'iso' in txt: + try: + p.view_isometric() + except Exception: + try: + p.view_vector((1.0, 1.0, 1.0)) + if cam is not None: + cam.view_up = (0.0, 0.0, 1.0) + except Exception: + pass + else: + # Axis-aligned + if 'h+' in txt: + p.view_vector((1.0, 0.0, 0.0)) + if cam is not None: + cam.view_up = (0.0, 0.0, 1.0) + elif 'h-' in txt: + p.view_vector((-1.0, 0.0, 0.0)) + if cam is not None: + cam.view_up = (0.0, 0.0, 1.0) + elif 'k+' in txt: + p.view_vector((0.0, 1.0, 0.0)) + if cam is not None: + cam.view_up = (0.0, 0.0, 1.0) + elif 'k-' in txt: + p.view_vector((0.0, -1.0, 0.0)) + if cam is not None: + cam.view_up = (0.0, 0.0, 1.0) + elif 'l+' in txt: + p.view_vector((0.0, 0.0, 1.0)) + if cam is not None: + cam.view_up = (0.0, 1.0, 0.0) + elif 'l-' in txt: + p.view_vector((0.0, 0.0, -1.0)) + if cam is not None: + cam.view_up = (0.0, 1.0, 0.0) + try: + if cam is not None and hasattr(cam, 'orthogonalize_view_up'): + cam.orthogonalize_view_up() + except Exception: + pass + try: + p.render() + except Exception: + pass + except Exception: + pass + + def view_slice_normal(self): + try: + normal, origin = self.get_plane_state() + normal = self.normalize_vector(normal) + origin = np.array(origin, dtype=float) + cam = getattr(self.plotter, 'camera', None) + if cam is None: + return + # distance heuristic + try: + rng = None + if self.cloud_mesh_3d is not None and hasattr(self.cloud_mesh_3d, 'points'): + rng = self.cloud_mesh_3d.points.max(axis=0) - self.cloud_mesh_3d.points.min(axis=0) + d = float(np.linalg.norm(rng)) * 0.5 if rng is not None else 1.0 + except Exception: + d = 1.0 + try: + cam.focal_point = origin.tolist() + except Exception: + pass + try: + cam.position = (origin + normal * d).tolist() + except Exception: + pass + # adjust view up if parallel + try: + up = np.array(getattr(cam, 'view_up', [0.0, 1.0, 0.0]), dtype=float) + upn = self.normalize_vector(up) + if abs(float(np.dot(upn, normal))) > 0.99: + new_up = np.array([0.0, 1.0, 0.0], dtype=float) if abs(normal[1]) < 0.99 else np.array([1.0, 0.0, 0.0], dtype=float) + cam.view_up = new_up.tolist() + except Exception: + pass + try: + self.plotter.render() + except Exception: + pass + except Exception: + pass diff --git a/viewer/workbench/workbench.py b/viewer/workbench/workbench.py new file mode 100644 index 0000000..0ab9a2d --- /dev/null +++ b/viewer/workbench/workbench.py @@ -0,0 +1,3588 @@ +#!/usr/bin/env python3 +""" +Workbench Window +A PyQt-based application for analyzing HDF5 data with 2D visualization capabilities. +Inherits from BaseWindow for consistent functionality across the application. +""" + +import sys +import os +from pathlib import Path +from PyQt5.QtWidgets import QApplication, QMessageBox, QTreeWidgetItem, QFileDialog, QMenu, QAction, QVBoxLayout, QDockWidget, QListWidget, QListWidgetItem, QInputDialog, QTableWidget, QTableWidgetItem, QPushButton, QLabel, QWidget +from PyQt5.QtCore import QTimer, Qt, pyqtSlot, QThread, QObject, pyqtSignal +from PyQt5.QtGui import QBrush, QColor, QCursor +import h5py +import hdf5plugin # Import hdf5plugin for decompression support +import glob +import numpy as np +import time +import pyqtgraph as pg +from viewer.workbench.tabs.workspace_3d import Workspace3D + + +# Add the project root to the Python path +project_root = Path(__file__).resolve().parents[2] +sys.path.insert(0, str(project_root)) + +from viewer.base_window import BaseWindow +from utils.hdf5_loader import HDF5Loader + +# Dimension-specific controls +from viewer.controls.controls_1d import Controls1D +from viewer.controls.controls_2d import Controls2D +from viewer.workbench.managers.roi_manager import ROIManager +from viewer.workbench.dock_window import DockWindow +from viewer.workbench.docks.data_structure import DataStructureDock +from viewer.workbench.docks.info_2d_dock import Info2DDock +from viewer.workbench.docks.info_3d_dock import Info3DDock +from viewer.workbench.docks.slice_plane import SlicePlaneDock +#from viewer.workbench.docks.dash_ai import DashAI + + +class WorkbenchWindow(BaseWindow): + """ + Workbench window for data analysis. + Inherits from BaseWindow and adds specific functionality for HDF5 analysis. + """ + + # === Initialization & UI Setup === + def __init__(self): + """Initialize the Workbench window.""" + super().__init__(ui_file_name="workbench/workbench.ui", viewer_name="Workbench") + self.setup_window_properties("Workbench - Data Analysis", 1600, 1000) + + # ====== DOCKS START ====== # + # 2d + #self.dash_sam_dock = DashAI(main_window=self) + + # 3d + + # other + self.data_structure_dock = DataStructureDock(main_window=self, segment_name="other", dock_area=Qt.LeftDockWidgetArea) + # info dock (2D) + self.info_2d_dock = Info2DDock(main_window=self, title="2D Info", segment_name="2d", dock_area=Qt.RightDockWidgetArea) + # info dock (3D) + self.info_3d_dock = Info3DDock(main_window=self, title="3D Info", segment_name="3d", dock_area=Qt.RightDockWidgetArea) + # roi + + # Alias Workbench's tree to the dock's tree widget + self.tree_data = self.data_structure_dock.tree_data + # Hide any fixed left panel from the UI and give space to analysis + if hasattr(self, 'leftPanel') and self.leftPanel is not None: + self.leftPanel.hide() + if hasattr(self, 'mainSplitter') and self.mainSplitter is not None: + self.mainSplitter.setSizes([0, self.width()]) + # ======= DOCKS END ======== # + + + # ======= TABS START ======= # + self.tab_1d = None + self.tab_2d = None + self.tab_3d = Workspace3D(parent=self, main_window=self) + # Slice Controls dock (left, under Data Structure) + try: + self.slice_plane_dock = SlicePlaneDock(main_window=self, segment_name="3d", dock_area=Qt.LeftDockWidgetArea) + # Position below Data Structure dock + try: + self.splitDockWidget(self.data_structure_dock, self.slice_plane_dock, Qt.Vertical) + except Exception: + pass + except Exception: + pass + # ======= TABS END ========= # + + + # ===== CONTROLS START ===== # + self.controls_1d = Controls1D(self) + self.controls_2d = Controls2D(self) + # ======== CONTROLS END ======= # + + # ROI manager to centralize ROI logic + self.roi_manager = ROIManager(self) + # Track secondary dock windows (modeless) + self._dock_windows = [] + + self.setup_2d_workspace() + self.setup_1d_workspace() + self.setup_workbench_connections() + + # Use shared HDF5 loader utility + self.h5loader = HDF5Loader() + + # == FILE PATH INFO START ====== # + self.current_file_path = None + self.selected_dataset_path = None + # ==== FILE PATH INFO END ====== # + + + # ROI state + self.rois = [] + self.current_roi = None + # ROI dock mappings + self.roi_by_item = {} + self.item_by_roi_id = {} + self.roi_names = {} + self.stats_row_by_roi_id = {} + self.roi_plot_docks_by_roi_id = {} + # Setup dock to track ROIs and stats + try: + self.roi_manager.setup_docks() + except Exception: + pass + # Initialize 2D axis variables + try: + self.axis_2d_x = "Columns" + self.axis_2d_y = "Row" + except Exception: + pass + + def setup_roi_dock(self): + try: + self.roi_dock = QDockWidget("ROIs", self) + self.roi_dock.setAllowedAreas(Qt.RightDockWidgetArea) + self.roi_list = QListWidget() + try: + self.roi_list.itemClicked.connect(self.on_roi_list_item_clicked) + self.roi_list.itemDoubleClicked.connect(self.on_roi_list_item_double_clicked) + # Enable right-click context menu on ROI list + self.roi_list.setContextMenuPolicy(Qt.CustomContextMenu) + self.roi_list.customContextMenuRequested.connect(self.show_roi_list_context_menu) + except Exception: + pass + self.roi_dock.setWidget(self.roi_list) + self.addDockWidget(Qt.RightDockWidgetArea, self.roi_dock) + try: + self.roi_dock.visibilityChanged.connect(self.on_rois_dock_visibility_changed) + except Exception: + pass + except Exception as e: + self.update_status(f"Error setting up ROI dock: {e}") + + def format_roi_text(self, roi): + try: + pos = roi.pos(); size = roi.size() + x = int(pos.x()); y = int(pos.y()) + w = int(size.x()); h = int(size.y()) + name = self.get_roi_name(roi) + return f"{name}: x={x}, y={y}, w={w}, h={h}" + except Exception: + return "ROI" + + def add_roi_to_dock(self, roi): + try: + if not hasattr(self, 'roi_list') or self.roi_list is None: + return + text = self.format_roi_text(roi) + item = QListWidgetItem(text) + self.roi_list.addItem(item) + self.roi_by_item[item] = roi + self.item_by_roi_id[id(roi)] = item + except Exception as e: + self.update_status(f"Error adding ROI to dock: {e}") + + def update_roi_item(self, roi): + try: + item = self.item_by_roi_id.get(id(roi)) + if item is not None: + item.setText(self.format_roi_text(roi)) + except Exception: + pass + + def on_roi_list_item_clicked(self, item): + try: + roi = self.roi_by_item.get(item) + if roi: + self.set_active_roi(roi) + except Exception as e: + self.update_status(f"Error selecting ROI from dock: {e}") + + def on_roi_list_item_double_clicked(self, item): + try: + roi = self.roi_by_item.get(item) + if roi: + self.show_roi_stats_for_roi(roi) + except Exception as e: + self.update_status(f"Error showing ROI stats from dock: {e}") + + def show_roi_list_context_menu(self, position): + """Show context menu for ROI list items with an option to open a PyQtGraph view of the ROI.""" + try: + if not hasattr(self, 'roi_list') or self.roi_list is None: + return + item = self.roi_list.itemAt(position) + if item is None: + return + roi = self.roi_by_item.get(item) + if roi is None: + return + menu = QMenu(self) + action_plot = QAction("Open ROI Plot", self) + action_plot.triggered.connect(lambda: self.open_roi_plot_dock(roi)) + menu.addAction(action_plot) + # Also provide windowed ROI plot with ROI Math panel + action_plot_window = QAction("Open ROI Plot (Window)", self) + action_plot_window.triggered.connect(lambda: self.open_roi_plot(roi)) + menu.addAction(action_plot_window) + # Also provide ROI Math dock + action_math_dock = QAction("Open ROI Math Dock", self) + action_math_dock.triggered.connect(lambda: self.open_roi_math_dock(roi)) + menu.addAction(action_math_dock) + # Potential future actions can be added here + menu.exec_(self.roi_list.mapToGlobal(position)) + except Exception as e: + self.update_status(f"Error showing ROI context menu: {e}") + + def open_roi_plot(self, roi): + """Open a modeless window displaying a 1D plot of the selected ROI region.""" + try: + frame_data = self.get_current_frame_data() + if frame_data is None: + QMessageBox.information(self, "ROI Plot", "No image data available.") + return + # Compute ROI bounds + pos = roi.pos(); size = roi.size() + x0 = max(0, int(pos.x())); y0 = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + height, width = frame_data.shape + x1 = min(width, x0 + w); y1 = min(height, y0 + h) + if x0 >= x1 or y0 >= y1: + QMessageBox.information(self, "ROI Plot", "ROI area is empty or out of bounds.") + return + sub = frame_data[y0:y1, x0:x1] + # Create and show the 1D plot dialog (modeless) + try: + from viewer.workbench.roi_plot_dialog import ROIPlotDialog + except Exception: + ROIPlotDialog = None + if ROIPlotDialog is None: + QMessageBox.warning(self, "ROI Plot", "ROIPlotDialog not available.") + return + # Keep a reference to avoid GC + if not hasattr(self, '_roi_plot_dialogs'): + self._roi_plot_dialogs = [] + dlg = ROIPlotDialog(self, sub) + dlg.setWindowTitle(f"ROI: {self.get_roi_name(roi)}") + dlg.resize(600, 500) + # Wire ROI & frame changes to update dialog data + def _update_dialog_data(): + try: + frame = self.get_current_frame_data() + except Exception: + frame = None + if frame is None: + return + sub_img = None + try: + image_item = getattr(self.image_view, 'imageItem', None) if hasattr(self, 'image_view') else None + if image_item is not None: + sub_img = roi.getArrayRegion(frame, image_item) + if sub_img is not None and hasattr(sub_img, 'ndim') and sub_img.ndim > 2: + sub_img = np.squeeze(sub_img) + except Exception: + sub_img = None + if sub_img is None or int(getattr(sub_img, 'size', 0)) == 0: + # Fallback to axis-aligned bbox + pos = roi.pos(); size = roi.size() + x0 = max(0, int(pos.x())); y0 = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + hgt, wid = frame.shape + x1 = min(wid, x0 + w); y1 = min(hgt, y0 + h) + if x0 < x1 and y0 < y1: + sub_img = frame[y0:y1, x0:x1] + if sub_img is not None and int(getattr(sub_img, 'size', 0)) > 0: + try: + dlg.update_roi_data(sub_img) + except Exception: + pass + try: + if hasattr(roi, 'sigRegionChanged'): + roi.sigRegionChanged.connect(_update_dialog_data) + if hasattr(roi, 'sigRegionChangeFinished'): + roi.sigRegionChangeFinished.connect(_update_dialog_data) + except Exception: + pass + try: + if hasattr(self, 'frame_spinbox'): + self.frame_spinbox.valueChanged.connect(lambda _: _update_dialog_data()) + except Exception: + pass + dlg.show() + # Track alive dialogs + self._roi_plot_dialogs.append(dlg) + except Exception as e: + self.update_status(f"Error opening ROI plot: {e}") + + def open_roi_math_dock(self, roi): + """Open a dockable ROI Math window on the right dock area.""" + try: + # Ensure ROI exists + if roi is None: + QMessageBox.information(self, "ROI Math", "No ROI selected.") + return + # Import the ROIMathDock + try: + from viewer.workbench.roi_math_dock import ROIMathDock + except Exception: + ROIMathDock = None + if ROIMathDock is None: + QMessageBox.warning(self, "ROI Math", "ROIMathDock not available.") + return + # Create and add the dock widget + dock_title = f"ROI Math: {self.get_roi_name(roi)}" + dock = ROIMathDock(self, dock_title, self, roi) + self.addDockWidget(Qt.RightDockWidgetArea, dock) + # Register toggle under Windows->2d submenu + try: + self.add_dock_toggle_action(dock, dock_title, segment_name="2d") + except Exception: + pass + dock.show() + # Track alive docks + if not hasattr(self, '_roi_math_dock_widgets'): + self._roi_math_dock_widgets = [] + self._roi_math_dock_widgets.append(dock) + try: + if not hasattr(self, 'roi_math_docks_by_roi_id') or self.roi_math_docks_by_roi_id is None: + self.roi_math_docks_by_roi_id = {} + self.roi_math_docks_by_roi_id.setdefault(id(roi), []).append(dock) + except Exception: + pass + except Exception as e: + self.update_status(f"Error opening ROI Math dock: {e}") + + def open_roi_plot_dock(self, roi): + """Open a dockable 1D plot of the selected ROI region on the right dock area.""" + try: + frame_data = self.get_current_frame_data() + if frame_data is None: + QMessageBox.information(self, "ROI Plot", "No image data available.") + return + # Compute ROI bounds + pos = roi.pos(); size = roi.size() + x0 = max(0, int(pos.x())); y0 = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + height, width = frame_data.shape + x1 = min(width, x0 + w); y1 = min(height, y0 + h) + if x0 >= x1 or y0 >= y1: + QMessageBox.information(self, "ROI Plot", "ROI area is empty or out of bounds.") + return + sub = frame_data[y0:y1, x0:x1] + # Create and add the dock widget + try: + from viewer.workbench.roi_plot_dock import ROIPlotDock + except Exception: + ROIPlotDock = None + if ROIPlotDock is None: + QMessageBox.warning(self, "ROI Plot", "ROIPlotDock not available.") + return + dock_title = f"ROI: {self.get_roi_name(roi)}" + dock = ROIPlotDock(self, dock_title, self, roi) + self.addDockWidget(Qt.RightDockWidgetArea, dock) + # Register toggle under Windows->2d submenu + try: + self.add_dock_toggle_action(dock, dock_title, segment_name="2d") + except Exception: + pass + dock.show() + # Track alive docks + if not hasattr(self, '_roi_plot_dock_widgets'): + self._roi_plot_dock_widgets = [] + self._roi_plot_dock_widgets.append(dock) + try: + if not hasattr(self, 'roi_plot_docks_by_roi_id') or self.roi_plot_docks_by_roi_id is None: + self.roi_plot_docks_by_roi_id = {} + self.roi_plot_docks_by_roi_id.setdefault(id(roi), []).append(dock) + except Exception: + pass + except Exception as e: + self.update_status(f"Error opening ROI plot dock: {e}") + + def create_dock_window_and_show(self): + """Create a new modeless empty window to host dockables later.""" + try: + win = DockWindow(self, title="Dock Window", width=1000, height=700) + # Keep reference to prevent garbage collection while open + self._dock_windows.append(win) + try: + win.destroyed.connect(lambda _: self._dock_windows.remove(win) if win in self._dock_windows else None) + except Exception: + pass + win.show() + # Do not disable main window; ensure modeless behavior + try: + win.raise_() + win.activateWindow() + except Exception: + pass + except Exception as e: + self.update_status(f"Error creating Dock Window: {e}") + + def setup_roi_stats_dock(self): + try: + self.roi_stats_dock = QDockWidget("ROI", self) + self.roi_stats_dock.setAllowedAreas(Qt.RightDockWidgetArea) + self.roi_stats_table = QTableWidget(0, 11, self.roi_stats_dock) + self.roi_stats_table.setHorizontalHeaderLabels(["Name","sum","min","max","mean","std","count","x","y","w","h"]) + self.roi_stats_dock.setWidget(self.roi_stats_table) + self.addDockWidget(Qt.RightDockWidgetArea, self.roi_stats_dock) + try: + self.roi_stats_dock.visibilityChanged.connect(self.on_roi_stats_dock_visibility_changed) + except Exception: + pass + except Exception as e: + self.update_status(f"Error setting up ROI stats dock: {e}") + + def get_roi_name(self, roi): + try: + # Prefer ROIManager's naming to keep everything in sync (including renames) + if hasattr(self, 'roi_manager') and self.roi_manager is not None: + try: + return self.roi_manager.get_roi_name(roi) + except Exception: + pass + # Fallback to local mapping + name = self.roi_names.get(id(roi)) + if name: + return name + idx = 1 + if hasattr(self, 'rois') and roi in self.rois: + idx = self.rois.index(roi) + 1 + name = f"ROI {idx}" + self.roi_names[id(roi)] = name + return name + except Exception: + return "ROI" + + def rename_roi(self, roi): + """Delegate to ROIManager.""" + try: + self.roi_manager.rename_roi(roi) + except Exception as e: + self.update_status(f"Error renaming ROI: {e}") + + def update_roi_plot_dock_title(self, roi): + try: + name = self.get_roi_name(roi) + title = f"ROI: {name}" + try: + docks = self.roi_plot_docks_by_roi_id.get(id(roi), []) if hasattr(self, 'roi_plot_docks_by_roi_id') else [] + except Exception: + docks = [] + for dock in list(docks): + try: + dock.setWindowTitle(title) + except Exception: + pass + except Exception: + pass + + def ensure_stats_row_for_roi(self, roi): + try: + if id(roi) in self.stats_row_by_roi_id: + return self.stats_row_by_roi_id[id(roi)] + if not hasattr(self, 'roi_stats_table') or self.roi_stats_table is None: + return None + row = self.roi_stats_table.rowCount() + self.roi_stats_table.insertRow(row) + self.stats_row_by_roi_id[id(roi)] = row + # set name cell + name = self.get_roi_name(roi) + self.roi_stats_table.setItem(row, 0, QTableWidgetItem(name)) + return row + except Exception: + return None + + def update_stats_table_for_roi(self, roi, stats): + try: + row = self.ensure_stats_row_for_roi(roi) + if row is None: + return + # name cell keep in sync + self.roi_stats_table.setItem(row, 0, QTableWidgetItem(self.get_roi_name(roi))) + # fill numeric cells with xywh at the end + self.roi_stats_table.setItem(row, 1, QTableWidgetItem(f"{stats['sum']:.3f}")) + self.roi_stats_table.setItem(row, 2, QTableWidgetItem(f"{stats['min']:.3f}")) + self.roi_stats_table.setItem(row, 3, QTableWidgetItem(f"{stats['max']:.3f}")) + self.roi_stats_table.setItem(row, 4, QTableWidgetItem(f"{stats['mean']:.3f}")) + self.roi_stats_table.setItem(row, 5, QTableWidgetItem(f"{stats['std']:.3f}")) + self.roi_stats_table.setItem(row, 6, QTableWidgetItem(str(stats['count']))) + self.roi_stats_table.setItem(row, 7, QTableWidgetItem(str(stats['x']))) + self.roi_stats_table.setItem(row, 8, QTableWidgetItem(str(stats['y']))) + self.roi_stats_table.setItem(row, 9, QTableWidgetItem(str(stats['w']))) + self.roi_stats_table.setItem(row, 10, QTableWidgetItem(str(stats['h']))) + except Exception: + pass + + def on_rois_dock_visibility_changed(self, visible): + try: + if hasattr(self, 'action_show_rois_dock'): + self.action_show_rois_dock.setChecked(bool(visible)) + except Exception: + pass + + def on_roi_stats_dock_visibility_changed(self, visible): + try: + if hasattr(self, 'action_show_roi_stats_dock'): + self.action_show_roi_stats_dock.setChecked(bool(visible)) + except Exception: + pass + + def setup_workbench_connections(self): + """Set up connections specific to the workbench.""" + # Tree widget connections + if hasattr(self, 'tree_data'): + self.tree_data.itemClicked.connect(self.on_tree_item_clicked) + self.tree_data.itemDoubleClicked.connect(self.on_tree_item_double_clicked) + self.tree_data.setContextMenuPolicy(Qt.CustomContextMenu) + self.tree_data.customContextMenuRequested.connect(self.show_context_menu) + + # View menu actions + if hasattr(self, 'actionCollapseAll'): + self.actionCollapseAll.triggered.connect(self.collapse_all) + if hasattr(self, 'actionExpandAll'): + self.actionExpandAll.triggered.connect(self.expand_all) + + # Windows menu: add toggles to show/hide docks, with room for future items + # try: + # windows_menu = None + # if hasattr(self, 'menuBar') and self.menuBar is not None: + # try: + # windows_menu = self.menuBar.addMenu("Windows") + # except Exception: + # windows_menu = QMenu("Windows", self) + # try: + # self.menuBar().addMenu(windows_menu) + # except Exception: + # pass + # else: + # windows_menu = QMenu("Windows", self) + # try: + # self.menuBar().addMenu(windows_menu) + # except Exception: + # pass + + # # ROI dock toggle (renamed from 'ROI Stats' to 'ROI') + # self.action_show_roi_stats_dock = QAction("ROI", self) + # self.action_show_roi_stats_dock.setCheckable(True) + # self.action_show_roi_stats_dock.setChecked(True if hasattr(self, 'roi_stats_dock') and self.roi_stats_dock.isVisible() else True) + # self.action_show_roi_stats_dock.toggled.connect(lambda checked: hasattr(self, 'roi_stats_dock') and self.roi_stats_dock.setVisible(checked)) + # windows_menu.addAction(self.action_show_roi_stats_dock) + + # # Open ROI Math dock for the active ROI + # self.action_open_roi_math_dock = QAction("ROI Math (Active ROI)", self) + # self.action_open_roi_math_dock.setToolTip("Open ROI Math dock for the currently active ROI") + # self.action_open_roi_math_dock.triggered.connect(lambda: hasattr(self, 'current_roi') and self.open_roi_math_dock(self.current_roi)) + # windows_menu.addAction(self.action_open_roi_math_dock) + + # # Add Window: open an empty, modeless window for dockables + # self.action_add_window = QAction("Add Window", self) + # self.action_add_window.setToolTip("Open a new empty window for dockable tools") + # self.action_add_window.triggered.connect(self.create_dock_window_and_show) + # windows_menu.addAction(self.action_add_window) + # except Exception: + # pass + + # Set up default splitter sizes + self.setup_default_splitter_sizes() + + # Initialize file info text box + self.initialize_file_info_display() + + def setup_default_splitter_sizes(self): + """Set default splitter sizes for the horizontal splitter.""" + if hasattr(self, 'mainSplitter'): + # Calculate 15% of window width for data structure panel + window_width = self.width() + data_panel_width = int(window_width * 0.15) + analysis_panel_width = window_width - data_panel_width + + # Set the horizontal splitter sizes + self.mainSplitter.setSizes([data_panel_width, analysis_panel_width]) + + def initialize_file_info_display(self): + """Initialize the file information display.""" + if hasattr(self, 'file_info_text'): + self.update_file_info_display("No file loaded", {}) + + def setup_2d_workspace(self): + """Set up the 2D workspace with PyQtGraph plotitem functionality.""" + try: + # Setup the 2D plot viewer with PyQtGraph + self.setup_2d_plot_viewer() + + # Setup 2D controls connections (delegated) + self.controls_2d.setup() + + except Exception as e: + self.update_status(f"Error setting up 2D workspace: {e}") + # Fallback to keeping the placeholder if setup fails + + def setup_2d_plot_viewer(self): + """Set up the 2D plot viewer with PyQtGraph PlotItem and ImageView.""" + try: + # Create the plot item and image view similar to HKL slice 2D viewer + self.plot_item = pg.PlotItem() + self.image_view = pg.ImageView(view=self.plot_item) + + # Set axis labels + self.plot_item.setLabel('bottom', 'Columns [pixels]') + self.plot_item.setLabel('left', 'Row [pixels]') + + # Lock aspect ratio for square pixels + try: + self.image_view.view.setAspectLocked(True) + except Exception: + pass + + # Add the image view directly to the plot host + if hasattr(self, 'layoutPlotHost'): + self.layoutPlotHost.addWidget(self.image_view) + else: + print("Warning: layoutPlotHost not found, 2D plot may not display correctly") + + + # Initialize with empty data + self.clear_2d_plot() + + # Setup hover overlays and mouse tracking + self._setup_2d_hover() + + # Set default hover enabled and preserve default context menu + try: + self._hover_enabled = True + if hasattr(self, 'image_view') and self.image_view is not None: + # Restore default context menu (do not override with custom) + self.image_view.setContextMenuPolicy(Qt.DefaultContextMenu) + except Exception: + pass + + except Exception as e: + self.update_status(f"Error setting up 2D plot viewer: {e}") + + # === Controls: 2D === + def setup_controls_2d(self): + """Set up connections for the 2D viewer controls.""" + try: + # Connect colormap selection + if hasattr(self, 'cbColorMapSelect_2d'): + self.cbColorMapSelect_2d.currentTextChanged.connect(self.on_colormap_changed) + + # Connect auto levels checkbox + if hasattr(self, 'cbAutoLevels'): + self.cbAutoLevels.toggled.connect(self.on_auto_levels_toggled) + + # Connect frame navigation controls from UI + if hasattr(self, 'btn_prev_frame'): + self.btn_prev_frame.clicked.connect(self.previous_frame) + if hasattr(self, 'btn_next_frame'): + self.btn_next_frame.clicked.connect(self.next_frame) + if hasattr(self, 'frame_spinbox'): + self.frame_spinbox.valueChanged.connect(self.on_frame_spinbox_changed) + + # Connect new speckle analysis controls + if hasattr(self, 'cbLogScale'): + self.cbLogScale.toggled.connect(self.on_log_scale_toggled) + + if hasattr(self, 'sbVmin'): + self.sbVmin.valueChanged.connect(self.on_vmin_changed) + + if hasattr(self, 'sbVmax'): + self.sbVmax.valueChanged.connect(self.on_vmax_changed) + + if hasattr(self, 'btnDrawROI'): + self.btnDrawROI.clicked.connect(self.on_draw_roi_clicked) + + if hasattr(self, 'sbRefFrame'): + self.sbRefFrame.valueChanged.connect(self.on_ref_frame_changed) + + if hasattr(self, 'sbOtherFrame'): + self.sbOtherFrame.valueChanged.connect(self.on_other_frame_changed) + + + + + + # Playback controls for 3D stacks in 2D viewer (UI-defined or created programmatically) + try: + # Ensure playback timer exists + if not hasattr(self, 'play_timer') or self.play_timer is None: + self.play_timer = QTimer(self) + try: + self.play_timer.timeout.connect(self._advance_frame_playback) + print("[PLAYBACK] Created play_timer and wired timeout") + except Exception as e: + print(f"[PLAYBACK] ERROR wiring timer: {e}") + + # Wire controls if present in UI + if hasattr(self, 'btn_play'): + try: + self.btn_play.clicked.connect(self.start_playback) + print("[PLAYBACK] Wired btn_play -> start_playback") + except Exception as e: + print(f"[PLAYBACK] ERROR wiring btn_play: {e}") + if hasattr(self, 'btn_pause'): + try: + self.btn_pause.clicked.connect(self.pause_playback) + print("[PLAYBACK] Wired btn_pause -> pause_playback") + except Exception as e: + print(f"[PLAYBACK] ERROR wiring btn_pause: {e}") + if hasattr(self, 'sb_fps'): + try: + self.sb_fps.valueChanged.connect(self.on_fps_changed) + print("[PLAYBACK] Wired sb_fps -> on_fps_changed") + except Exception as e: + print(f"[PLAYBACK] ERROR wiring sb_fps: {e}") + # cb_auto_replay is read in _advance_frame_playback; no signal wiring needed + + # Default disabled; enabled when 3D data with >3 frames is loaded + try: + if hasattr(self, 'btn_play'): + self.btn_play.setEnabled(False) + if hasattr(self, 'btn_pause'): + self.btn_pause.setEnabled(False) + if hasattr(self, 'sb_fps'): + self.sb_fps.setEnabled(False) + if hasattr(self, 'cb_auto_replay'): + self.cb_auto_replay.setEnabled(False) + try: + # Select auto replay by default + self.cb_auto_replay.setChecked(True) + except Exception: + pass + except Exception: + pass + except Exception: + pass + + + + except Exception as e: + self.update_status(f"Error setting up 2D connections: {e}") + + def setup_1d_workspace(self): + """Set up the 1D workspace with PyQtGraph PlotItem.""" + try: + self.plot_item_1d = pg.PlotItem() + self.plot_widget_1d = pg.PlotWidget(plotItem=self.plot_item_1d) + self.plot_item_1d.setLabel('bottom', 'Index') + self.plot_item_1d.setLabel('left', 'Value') + if hasattr(self, 'layout1DPlotHost'): + self.layout1DPlotHost.addWidget(self.plot_widget_1d) + else: + print("Warning: layout1DPlotHost not found, 1D plot may not display correctly") + self.clear_1d_plot() + # Setup 1D controls connections (delegated) + self.controls_1d.setup() + except Exception as e: + self.update_status(f"Error setting up 1D workspace: {e}") + + # === Controls: 1D === + def setup_controls_1d(self): + """Set up connections for the 1D controls.""" + try: + # Placeholder for future 1D controls (e.g., levels, scale, etc.) + pass + except Exception as e: + self.update_status(f"Error setting up 1D connections: {e}") + + # === Controls: 3D === + def setup_controls_3d(self): + pass + + def setup_2d_file_display(self): + """Set up the 2D file information display in the main workspace.""" + from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QLabel, QTextEdit, QGroupBox, QTabWidget, QSplitter + from PyQt5.QtCore import Qt + + # Create a vertical splitter for the workspace + self.workspace_splitter = QSplitter(Qt.Vertical) + + # Create top section for main workspace + self.main_workspace_widget = QGroupBox() + self.main_workspace_layout = QVBoxLayout(self.main_workspace_widget) + + # File status label at the top + self.file_status_label = QLabel("No HDF5 file loaded") + self.file_status_label.setStyleSheet("font-size: 14px; font-weight: bold; color: #2c3e50; padding: 10px;") + self.file_status_label.setAlignment(Qt.AlignCenter) + self.main_workspace_layout.addWidget(self.file_status_label) + + # Add a spacer to push content to top + from PyQt5.QtWidgets import QSpacerItem, QSizePolicy + spacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding) + self.main_workspace_layout.addItem(spacer) + + # Create bottom section for tabs (compact) + self.info_tabs_widget = QGroupBox() + self.info_tabs_layout = QVBoxLayout(self.info_tabs_widget) + self.info_tabs_layout.setContentsMargins(6, 6, 6, 6) + + # Create tab widget with compact size + self.info_tabs = QTabWidget() + self.info_tabs.setMaximumHeight(150) # Limit height to make it compact + self.info_tabs.setStyleSheet(""" + QTabWidget::pane { + border: 1px solid #dee2e6; + border-radius: 4px; + background-color: #f8f9fa; + } + QTabBar::tab { + background-color: #e9ecef; + border: 1px solid #dee2e6; + padding: 6px 12px; + margin-right: 2px; + font-size: 9pt; + } + QTabBar::tab:selected { + background-color: #f8f9fa; + border-bottom: 1px solid #f8f9fa; + } + """) + + # Dataset Information Tab + self.dataset_info_text = QTextEdit() + self.dataset_info_text.setStyleSheet(""" + QTextEdit { + background-color: #f8f9fa; + border: none; + padding: 6px; + font-family: 'Consolas', monospace; + font-size: 9pt; + } + """) + self.dataset_info_text.setReadOnly(True) + self.dataset_info_text.setPlainText("Select a dataset from the tree to view detailed information.") + + # File Information Tab + self.file_info_text = QTextEdit() + self.file_info_text.setStyleSheet(""" + QTextEdit { + background-color: #f8f9fa; + border: none; + padding: 6px; + font-family: 'Consolas', monospace; + font-size: 9pt; + } + """) + self.file_info_text.setReadOnly(True) + self.file_info_text.setPlainText("Load an HDF5 file to view file information.") + + # Add tabs + self.info_tabs.addTab(self.dataset_info_text, "Dataset Info") + self.info_tabs.addTab(self.file_info_text, "File Info") + + # Add tab widget to bottom container + self.info_tabs_layout.addWidget(self.info_tabs) + + # Add widgets to splitter + self.workspace_splitter.addWidget(self.main_workspace_widget) + self.workspace_splitter.addWidget(self.info_tabs_widget) + + # Set splitter sizes (85% for main workspace, 15% for tabs) + self.workspace_splitter.setSizes([850, 150]) + + # Add the splitter to the analysis layout + self.analysisLayout.addWidget(self.workspace_splitter) + + # === Supers: BaseWindow overrides === + def get_file_filters(self): + """ + Get file filters for HDF5 files. + + Returns: + str: File filter string for QFileDialog + """ + return "HDF5 Files (*.h5 *.hdf5);;All Files (*)" + + # def load_file_content(self, file_path): + # """ + # Load HDF5 file content and add it to the top of the data tree. + + # Args: + # file_path (str): Path to the HDF5 file to load + # """ + # try: + # # Update UI to show loading state + # self.update_status(f"Loading: {os.path.basename(file_path)}") + + # # Store the current file path + # self.current_file_path = file_path + + # # Update file info display + # self.update_file_info_display(file_path) + + # # Check if this file is already loaded to avoid duplicates + # if hasattr(self, 'tree_data'): + # for i in range(self.tree_data.topLevelItemCount()): + # existing_item = self.tree_data.topLevelItem(i) + # existing_path = existing_item.data(0, Qt.UserRole + 1) + # if existing_path == file_path: + # # File already loaded, just select it and return + # self.tree_data.setCurrentItem(existing_item) + # self.update_status(f"File already loaded: {os.path.basename(file_path)}") + # return + + # # Clear any existing visualizations + # self.clear_2d_plot() + # self.clear_3d_plot() + + # # Reset selected dataset path + # self.selected_dataset_path = None + + # # Open and read HDF5 file + # with h5py.File(file_path, 'r') as h5file: + # # Create root item + # root_item = QTreeWidgetItem([os.path.basename(file_path)]) + # root_item.setData(0, Qt.UserRole + 1, file_path) # Store file path + # root_item.setData(0, Qt.UserRole + 2, "file_root") # Mark as file root + + # # Insert at the top (index 0) instead of adding to the end + # self.tree_data.insertTopLevelItem(0, root_item) + + # # Recursively populate tree + # self._populate_tree_recursive(h5file, root_item) + + # # Keep the root item collapsed by default + # root_item.setExpanded(False) + + # # Select the newly added item + # self.tree_data.setCurrentItem(root_item) + + # # Update workspace displays + # if hasattr(self, 'file_status_label'): + # self.file_status_label.setText(f"HDF5 file loaded: {os.path.basename(file_path)}") + # if hasattr(self, 'dataset_info_text'): + # self.dataset_info_text.setPlainText("Select a dataset from the tree to view detailed information.") + + # self.update_status("HDF5 File Loaded Successfully") + + # except Exception as e: + # QMessageBox.critical(self, "Error", f"Failed to load HDF5 file: {str(e)}") + # self.update_status("Failed to load file") + + def save_file_content(self, file_path): + """ + Save analysis results. + + Args: + file_path (str): Path to save the analysis to + """ + try: + self.update_status(f"Saving: {os.path.basename(file_path)}") + + # TODO: Implement actual save functionality + # This would involve: + # 1. Collecting current analysis state + # 2. Saving results to HDF5 or other format + # 3. Saving workspace configuration + + # Simulate save delay + QTimer.singleShot(1000, lambda: self.update_status("Analysis Saved Successfully")) + + except Exception as e: + QMessageBox.critical(self, "Error", f"Failed to save analysis: {str(e)}") + self.update_status("Failed to save file") + + def load_folder_content(self, folder_path): + """ + Load all HDF5 files from a folder and organize them under a folder section. + + Args: + folder_path (str): Path to the folder containing HDF5 files + """ + try: + # Update UI to show loading state + folder_name = os.path.basename(folder_path) + self.update_status(f"Loading folder: {folder_name}") + + # Check if this folder is already loaded to avoid duplicates + if hasattr(self, 'tree_data'): + for i in range(self.tree_data.topLevelItemCount()): + existing_item = self.tree_data.topLevelItem(i) + existing_type = existing_item.data(0, Qt.UserRole + 2) + existing_path = existing_item.data(0, Qt.UserRole + 1) + if existing_type == "folder_section" and existing_path == folder_path: + # Folder already loaded, just select it and return + self.tree_data.setCurrentItem(existing_item) + self.update_status(f"Folder already loaded: {folder_name}") + return + + # Find all HDF5 files in the folder + h5_patterns = ['*.h5', '*.hdf5', '*.H5', '*.HDF5'] + h5_files = [] + for pattern in h5_patterns: + h5_files.extend(glob.glob(os.path.join(folder_path, pattern))) + + if not h5_files: + QMessageBox.information(self, "No Files", "No HDF5 files found in the selected folder.") + self.update_status("No HDF5 files found") + return + + # Sort files for consistent ordering + h5_files.sort() + + # Create folder section header at the top + folder_section_item = QTreeWidgetItem([f"📁 {folder_name} ({len(h5_files)} files)"]) + folder_section_item.setData(0, Qt.UserRole + 1, folder_path) # Store folder path + folder_section_item.setData(0, Qt.UserRole + 2, "folder_section") # Mark as folder section + + # Insert at the top (index 0) + self.tree_data.insertTopLevelItem(0, folder_section_item) + + # Load each HDF5 file under the folder section + loaded_count = 0 + for file_path in h5_files: + try: + self.load_single_h5_file_under_section(file_path, folder_section_item) + loaded_count += 1 + except Exception as e: + self.update_status(f"Failed to load {file_path}: {e}") + continue + + # Expand the folder section to show the files + folder_section_item.setExpanded(True) + + # Select the folder section + self.tree_data.setCurrentItem(folder_section_item) + + # Clear any existing visualizations + self.clear_2d_plot() + + # Update workspace displays + if hasattr(self, 'file_status_label'): + self.file_status_label.setText(f"Folder loaded: {folder_name} ({loaded_count} files)") + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText("Select a dataset from the tree to view detailed information.") + + self.update_status(f"Loaded {loaded_count} HDF5 files from folder: {folder_name}") + + except Exception as e: + QMessageBox.critical(self, "Error", f"Failed to load folder: {str(e)}") + self.update_status("Failed to load folder") + + # === Load utilities === + def _populate_tree_recursive(self, h5_group, parent_item): + """ + Recursively populate the tree widget with HDF5 structure. + + Args: + h5_group: HDF5 group or file object + parent_item: QTreeWidgetItem to add children to + """ + for key in h5_group.keys(): + item = h5_group[key] + + # Create tree item + tree_item = QTreeWidgetItem([key]) + parent_item.addChild(tree_item) + + # Store the full path as item data + full_path = item.name + tree_item.setData(0, 32, full_path) # Qt.UserRole = 32 + + if isinstance(item, h5py.Group): + # It's a group, add group indicator and recurse + tree_item.setText(0, f"{key} (Group)") + self._populate_tree_recursive(item, tree_item) + elif isinstance(item, h5py.Dataset): + # It's a dataset, show shape and dtype info + shape_str = f"{item.shape}" if item.shape else "scalar" + dtype_str = str(item.dtype) + tree_item.setText(0, f"{key} (Dataset: {shape_str}, {dtype_str})") + # Color renderable datasets blue (similar to speckle_thing visual hint) + if self.is_dataset_renderable(item): + tree_item.setForeground(0, QBrush(QColor('blue'))) + tree_item.setData(0, Qt.UserRole + 3, True) + + def load_single_h5_file(self, file_path): + """ + Load a single HDF5 file and add it to the tree. + + Args: + file_path (str): Path to the HDF5 file + """ + with h5py.File(file_path, 'r') as h5file: + # Create root item for this file + root_item = QTreeWidgetItem([os.path.basename(file_path)]) + root_item.setData(0, Qt.UserRole + 1, file_path) # Store file path for removal + root_item.setData(0, Qt.UserRole + 2, "file_root") # Mark as file root + self.tree_data.addTopLevelItem(root_item) + + # Recursively populate tree + self._populate_tree_recursive(h5file, root_item) + + # Keep the root item collapsed by default + root_item.setExpanded(False) + + def load_single_h5_file_under_section(self, file_path, parent_section): + """ + Load a single HDF5 file and add it under a folder section. + + Args: + file_path (str): Path to the HDF5 file + parent_section (QTreeWidgetItem): Parent folder section item + """ + with h5py.File(file_path, 'r') as h5file: + # Create root item for this file under the section + root_item = QTreeWidgetItem([os.path.basename(file_path)]) + root_item.setData(0, Qt.UserRole + 1, file_path) # Store file path + root_item.setData(0, Qt.UserRole + 2, "file_root") # Mark as file root + parent_section.addChild(root_item) + + # Recursively populate tree + self._populate_tree_recursive(h5file, root_item) + + # Keep the root item collapsed by default + root_item.setExpanded(False) + + def is_dataset_renderable(self, dset): + """Return True if dataset is numeric and can be rendered (2D/3D, or 1D perfect square).""" + try: + dtype = dset.dtype + ndim = len(dset.shape) + if np.issubdtype(dtype, np.number): + if ndim >= 2: + return True + if ndim == 1: + size = dset.size + if size >= 100: + side = int(np.sqrt(size)) + return side * side == size + return False + except Exception: + return False + + # Async dataset loader to prevent UI freeze + class DatasetLoader(QObject): + loaded = pyqtSignal(object) # numpy array + failed = pyqtSignal(str) + + def __init__(self, file_path, dataset_path, max_frames=100): + super().__init__() + self.file_path = file_path + self.dataset_path = dataset_path + self.max_frames = max_frames + + @pyqtSlot() + def run(self): + try: + import h5py, numpy as np + with h5py.File(self.file_path, 'r') as h5file: + if self.dataset_path not in h5file: + self.failed.emit("Dataset not found") + return + dset = h5file[self.dataset_path] + if not isinstance(dset, h5py.Dataset): + self.failed.emit("Selected item is not a dataset") + return + + # Efficient loading to avoid blocking on huge datasets + if len(dset.shape) == 3: + max_frames = min(self.max_frames, dset.shape[0]) + data = dset[:max_frames] + else: + # Guard against extremely large 2D datasets by center cropping + try: + estimated_size = dset.size * dset.dtype.itemsize + except Exception: + estimated_size = 0 + if len(dset.shape) == 2 and estimated_size > 512 * 1024 * 1024: # >512MB + h, w = dset.shape + ch = min(h, 2048) + cw = min(w, 2048) + y0 = max(0, (h - ch) // 2) + x0 = max(0, (w - cw) // 2) + data = dset[y0:y0+ch, x0:x0+cw] + else: + data = dset[...] + + data = np.asarray(data, dtype=np.float32) + # Clean high values + high_mask = data > 5e6 + if np.any(high_mask): + data[high_mask] = 0 + + # 1D handling: emit raw 1D data for dedicated 1D view + if data.ndim == 1: + # keep as 1D; no failure + pass + + self.loaded.emit(data) + except Exception as e: + self.failed.emit(f"Error loading dataset: {e}") + + def load_dataset_robustly(self, dataset): + """ + Load dataset with robust error handling and data cleaning like speckle_thing.py + + Args: + dataset: h5py.Dataset object + + Returns: + numpy.ndarray: Cleaned and processed data, or None if loading failed + """ + try: + self.update_status("Loading dataset...") + + # Load the data + if len(dataset.shape) == 3: + # For 3D datasets, load a reasonable number of frames (limit to 100 for memory) + max_frames = min(100, dataset.shape[0]) + if max_frames < dataset.shape[0]: + self.update_status(f"Loading first {max_frames} frames of {dataset.shape[0]} total frames") + data = dataset[:max_frames] + else: + # For 2D datasets, load all data + data = dataset[...] + + # Convert to float32 for consistent processing + data = np.asarray(data, dtype=np.float32) + + # Clean up data - set all values above 5e6 to zero (like speckle_thing.py) + self.update_status("Cleaning data (setting values > 5e6 to zero)...") + high_values_mask = data > 5e6 + if np.any(high_values_mask): + num_cleaned = np.count_nonzero(high_values_mask) + data[high_values_mask] = 0 + print(f"Cleaned {num_cleaned} pixels with values > 5e6") + + # Check for valid data + if data.size == 0: + self.update_status("Error: Dataset is empty") + return None + + # Check for all-zero data + if np.all(data == 0): + self.update_status("Warning: All data values are zero") + + # 1D data: if perfect square and reasonably large, reshape to 2D; otherwise keep as 1D for 1D view + if data.ndim == 1: + side_length = int(np.sqrt(data.size)) + if data.size >= 100 and side_length * side_length == data.size: + data = data.reshape(side_length, side_length) + self.update_status(f"Reshaped 1D data to {side_length}x{side_length}") + else: + self.update_status(f"Loaded 1D data of length {data.size}") + return data + + self.update_status(f"Successfully loaded data with shape {data.shape}") + return data + + except Exception as e: + error_msg = f"Error loading dataset robustly: {str(e)}" + self.update_status(error_msg) + print(error_msg) + import traceback + traceback.print_exc() + return None + + def visualize_selected_dataset(self): + """Load and plot the selected dataset; clear/render ROIs depending on dataset type (image vs ROI).""" + if not self.current_file_path or not self.selected_dataset_path: + print("[DEBUG] visualize_selected_dataset: no current_file_path or selected_dataset_path") + return + try: + print(f"[DEBUG] visualize_selected_dataset: selected_dataset_path={self.selected_dataset_path}") + sel_path = str(self.selected_dataset_path) + is_image_data = sel_path.endswith("/entry/data/data") or sel_path == "/entry/data/data" or sel_path.endswith("entry/data/data") + is_roi_data = sel_path.startswith("/entry/data/rois") or "/entry/data/rois/" in sel_path + + # Always clear existing ROI graphics before switching + try: + self.roi_manager.clear_all_rois() + except Exception: + pass + + if is_image_data: + # Load original image dataset via HDF5Loader (preferred) + use_h5loader = True + valid = self.h5loader.validate_file(self.current_file_path) + print(f"[DEBUG] HDF5Loader.validate_file -> {valid}") + if not valid: + self.update_status(f"HDF5 validation failed: {self.h5loader.get_last_error()}") + return + volume, vol_shape = self.h5loader.load_h5_volume_3d(self.current_file_path) + print(f"[DEBUG] HDF5Loader.load_h5_volume_3d shape={getattr(volume,'shape',None)}") + if volume is None or volume.size == 0: + self.update_status("No data in /entry/data/data") + return + data = volume + # Display image data + self.display_2d_data(data) + if hasattr(self, 'tabWidget_analysis'): + self.tabWidget_analysis.setCurrentIndex(0) + # Render ROIs associated with this dataset + try: + self.roi_manager.render_rois_for_dataset(self.current_file_path, '/entry/data/data') + except Exception: + pass + elif is_roi_data: + # When clicking on an ROI dataset, clear existing ROI boxes and render the ROI dataset itself as the image + with h5py.File(self.current_file_path, 'r') as h5f: + exists = self.selected_dataset_path in h5f + print(f"[DEBUG] ROI dataset exists in file? {exists}") + if not exists: + self.update_status("ROI dataset not found") + return + dset = h5f[self.selected_dataset_path] + if not isinstance(dset, h5py.Dataset): + self.update_status("Selected ROI item is not a dataset") + return + data = np.asarray(dset[...], dtype=np.float32) + # Display ROI-only data (2D or 3D with frames) + if data.ndim >= 2 and np.issubdtype(data.dtype, np.number): + self.display_2d_data(data) + if hasattr(self, 'tabWidget_analysis'): + self.tabWidget_analysis.setCurrentIndex(0) + # No ROI overlays when viewing ROI-only dataset + self.update_status(f"Loaded ROI dataset: {self.selected_dataset_path}") + else: + # Non-visualizable + self.clear_2d_plot() + self.update_status("ROI dataset loaded but not visualizable") + else: + # Fallback: open generic dataset directly + with h5py.File(self.current_file_path, 'r') as h5file: + exists = self.selected_dataset_path in h5file + print(f"[DEBUG] Dataset exists in file? {exists}") + if not exists: + self.update_status("Dataset not found") + return + dataset = h5file[self.selected_dataset_path] + print(f"[DEBUG] Dataset type={type(dataset)} shape={getattr(dataset,'shape',None)}") + if not isinstance(dataset, h5py.Dataset): + self.update_status("Selected item is not a dataset") + return + data = self.load_dataset_robustly(dataset) + print(f"[DEBUG] load_dataset_robustly returned shape={getattr(data,'shape',None)}") + if data is None: + return + if data.ndim >= 2 and np.issubdtype(data.dtype, np.number): + self.display_2d_data(data) + if hasattr(self, 'tabWidget_analysis'): + self.tabWidget_analysis.setCurrentIndex(0) + # Render ROIs for this dataset if any + try: + self.roi_manager.render_rois_for_dataset(self.current_file_path, self.selected_dataset_path) + except Exception: + pass + elif data.ndim == 1: + self.display_1d_data(data) + self.update_status("Loaded 1D dataset") + else: + self.clear_2d_plot() + self.update_status("Dataset loaded but not visualizable") + except Exception as e: + error_msg = f"Error loading dataset: {str(e)}" + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText(error_msg) + self.update_status(error_msg) + + def start_dataset_load(self): + """Create a worker thread to load dataset without blocking the UI.""" + try: + self.update_status(f"Loading dataset: {self.selected_dataset_path}") + self._dataset_thread = QThread() + self._dataset_worker = self.DatasetLoader(self.current_file_path, self.selected_dataset_path) + self._dataset_worker.moveToThread(self._dataset_thread) + self._dataset_thread.started.connect(self._dataset_worker.run) + self._dataset_worker.loaded.connect(self.on_dataset_loaded) + self._dataset_worker.failed.connect(self.on_dataset_failed) + # Ensure thread quits after work + self._dataset_worker.loaded.connect(self._dataset_thread.quit) + self._dataset_worker.failed.connect(self._dataset_thread.quit) + self._dataset_thread.start() + except Exception as e: + self.update_status(f"Error starting dataset load: {e}") + + @pyqtSlot(object) + def on_dataset_loaded(self, data): + """Handle dataset loaded event on main thread.""" + try: + # Visualize data + if data is None: + self.update_status("Loaded empty dataset") + return + if data.ndim >= 2 and np.issubdtype(data.dtype, np.number): + self.display_2d_data(data) + if hasattr(self, 'tabWidget_analysis'): + self.tabWidget_analysis.setCurrentIndex(0) + # Build info + info_lines = [] + info_lines.append(f"Dataset: {self.selected_dataset_path}") + # Read original shape/dtype quickly + try: + with h5py.File(self.current_file_path, 'r') as h5file: + dset = h5file[self.selected_dataset_path] + info_lines.append(f"Original Shape: {dset.shape}") + info_lines.append(f"Original Type: {dset.dtype}") + except Exception: + pass + info_lines.append(f"Loaded Shape: {data.shape}") + info_lines.append(f"Data Type: {data.dtype}") + info_lines.append(f"Size: {data.size:,} elements") + info_lines.append("\nData Statistics:") + info_lines.append(f"Min: {np.min(data):.6f}") + info_lines.append(f"Max: {np.max(data):.6f}") + info_lines.append(f"Mean: {np.mean(data):.6f}") + info_lines.append(f"Std: {np.std(data):.6f}") + # Memory usage + mem_size = data.size * data.dtype.itemsize + if mem_size < 1024: + mem_str = f"{mem_size} bytes" + elif mem_size < 1024 * 1024: + mem_str = f"{mem_size / 1024:.1f} KB" + elif mem_size < 1024 * 1024 * 1024: + mem_str = f"{mem_size / (1024 * 1024):.1f} MB" + else: + mem_str = f"{mem_size / (1024 * 1024 * 1024):.1f} GB" + info_lines.append(f"\nMemory Usage: {mem_str}") + info_text = "\n".join(info_lines) + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText(info_text) + if hasattr(self, 'file_status_label'): + self.file_status_label.setText(f"Loaded: {os.path.basename(self.selected_dataset_path)}") + self.update_status(f"Loaded dataset: {self.selected_dataset_path}") + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + self.info_2d_dock.refresh() + except Exception: + pass + else: + if data.ndim == 1: + self.display_1d_data(data) + self.update_status("Loaded 1D dataset") + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + self.info_2d_dock.refresh() + except Exception: + pass + else: + self.clear_2d_plot() + self.update_status("Dataset loaded but not visualizable") + except Exception as e: + self.update_status(f"Error handling loaded dataset: {e}") + + @pyqtSlot(str) + def on_dataset_failed(self, message): + """Handle dataset load failure.""" + try: + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText(message) + self.update_status(message) + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + self.info_2d_dock.refresh() + except Exception: + pass + except Exception as e: + self.update_status(f"Error updating failure status: {e}") + + def load_3d_data(self): + """Delegate 3D data loading to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.load_data() + except Exception as e: + self.update_status(f"Error loading 3D data: {e}") + + # === 2D Helpers === + def set_2d_axes(self, x_axis, y_axis): + try: + self.axis_2d_x = str(x_axis) if x_axis else None + self.axis_2d_y = str(y_axis) if y_axis else None + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + try: + self.info_2d_dock.refresh() + except Exception: + pass + except Exception: + pass + + def clear_2d_plot(self): + """Clear the 2D plot and show placeholder.""" + try: + if hasattr(self, 'image_view'): + # Create a small placeholder image + placeholder = np.zeros((100, 100), dtype=np.float32) + self.image_view.setImage(placeholder, autoLevels=False, autoRange=True) + + # Remove any existing ROIs + if hasattr(self, 'rois') and isinstance(self.rois, list): + for roi in self.rois: + try: + self.image_view.removeItem(roi) + except Exception: + pass + self.rois.clear() + self.current_roi = None + # Clear docked ROI list + if hasattr(self, 'roi_list') and self.roi_list is not None: + try: + self.roi_list.clear() + self.roi_by_item = {} + self.item_by_roi_id = {} + except Exception: + pass + # Clear ROI stats dock + if hasattr(self, 'roi_stats_table') and self.roi_stats_table is not None: + try: + self.roi_stats_table.setRowCount(0) + self.stats_row_by_roi_id = {} + except Exception: + pass + + # Set default axis labels + self.plot_item.setLabel('bottom', 'X') + self.plot_item.setLabel('left', 'Y') + try: + self.set_2d_axes("Columns", "Row") + except Exception: + pass + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + self.info_2d_dock.refresh() + except Exception: + pass + + # Update above-image info label with placeholder dimensions + if hasattr(self, 'image_info_label'): + try: + self.image_info_label.setText("Image Dimensions: 100x100 pixels") + except Exception: + pass + + # Remove hover overlays and clear HKL caches + try: + view = self.image_view.getView() if hasattr(self.image_view, 'getView') else None + if view is not None: + if hasattr(self, '_hover_hline') and self._hover_hline is not None: + try: + view.removeItem(self._hover_hline) + except Exception: + pass + self._hover_hline = None + if hasattr(self, '_hover_vline') and self._hover_vline is not None: + try: + view.removeItem(self._hover_vline) + except Exception: + pass + self._hover_vline = None + if hasattr(self, '_hover_text') and self._hover_text is not None: + try: + view.removeItem(self._hover_text) + except Exception: + pass + self._hover_text = None + self._mouse_proxy = None + self._qx_grid = None + self._qy_grid = None + self._qz_grid = None + except Exception: + pass + + except Exception as e: + self.update_status(f"Error clearing 2D plot: {e}") + + def update_overlay_text(self, width, height, frame_info=None): + """Update the label above the image with dimensions and optional frame info. + Augmented to append current motor position (if available) for the selected frame. + """ + try: + text = f"Image Dimensions: {width}x{height} pixels" + info = frame_info or "" + # Try to append motor position for current frame if 3D data + try: + if hasattr(self, 'current_2d_data') and self.current_2d_data is not None and self.current_2d_data.ndim == 3: + num_frames = int(self.current_2d_data.shape[0]) + idx = 0 + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + try: + idx = int(self.frame_spinbox.value()) + except Exception: + idx = 0 + motor_val = None + fp = getattr(self, 'current_file_path', None) + if fp and os.path.exists(fp): + try: + with h5py.File(fp, 'r') as h5f: + arr = self._find_motor_positions(h5f, num_frames) + if arr is not None and 0 <= idx < arr.size: + motor_val = float(arr[idx]) + except Exception: + motor_val = None + if motor_val is not None: + if info: + info = f"{info} | Motor {motor_val:.6f}" + else: + info = f"Motor {motor_val:.6f}" + except Exception: + pass + if info: + text = f"{text} ({info})" + if hasattr(self, 'image_info_label'): + self.image_info_label.setText(text) + except Exception as e: + self.update_status(f"Error updating image info label: {e}") + + def display_2d_data(self, data): + """Display 2D or 3D numeric data in the PyQtGraph ImageView.""" + try: + if not hasattr(self, 'image_view'): + print("Warning: ImageView not initialized") + return + + # Store the original data for frame navigation + self.current_2d_data = data + try: + print(f"[DISPLAY] data ndim={getattr(data,'ndim',None)}, shape={getattr(data,'shape',None)}") + except Exception: + pass + + # Handle different data dimensions + if data.ndim == 2: + # 2D data - display directly + image_data = np.asarray(data, dtype=np.float32) + + # Update frame controls for 2D data + self.update_frame_controls_for_2d_data() + + height, width = image_data.shape + if hasattr(self, 'frame_info_label'): + self.frame_info_label.setText(f"Image Dimensions: {width}x{height} pixels") + # Update overlay text + self.update_overlay_text(width, height, None) + + elif data.ndim == 3: + # 3D data - display first frame and set up navigation + image_data = np.asarray(data[0], dtype=np.float32) + + # Update frame controls for 3D data + num_frames = data.shape[0] + self.update_frame_controls_for_3d_data(num_frames) + + height, width = image_data.shape + if hasattr(self, 'frame_info_label'): + self.frame_info_label.setText(f"Image Dimensions: {width}x{height} pixels (frame 0 of {num_frames})") + # Update overlay text + self.update_overlay_text(width, height, f"Frame 0 of {num_frames}") + + else: + print(f"Unsupported data dimensions: {data.ndim}") + return + + # Set the image data + auto_levels = hasattr(self, 'cbAutoLevels') and self.cbAutoLevels.isChecked() + self.image_view.setImage( + image_data, + autoLevels=auto_levels, + autoRange=True, + autoHistogramRange=auto_levels + ) + # Ensure hover overlays exist after any prior clear + try: + self._setup_2d_hover() + except Exception: + pass + + # Update axis labels based on data shape + height, width = image_data.shape + self.plot_item.setLabel('bottom', f'Columns [pixels] (0 to {width-1})') + self.plot_item.setLabel('left', f'Row [pixels] (0 to {height-1})') + try: + self.set_2d_axes("Columns", "Row") + except Exception: + pass + + # Apply current colormap + if hasattr(self, 'cbColorMapSelect_2d'): + current_colormap = self.cbColorMapSelect_2d.currentText() + self.apply_colormap(current_colormap) + + # Update speckle analysis controls programmatically + self.update_speckle_controls_for_data(data) + + # Update vmin/vmax controls based on data + self.update_vmin_vmax_controls_for_data(image_data) + + # Refresh ROI stats for current frame/data + try: + self.roi_manager.update_all_roi_stats() + except Exception: + pass + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + self.info_2d_dock.refresh() + except Exception: + pass + # Refresh any open ROI Plot docks to reflect dataset change (axes and series) + try: + docks = [] + if hasattr(self, '_roi_plot_dock_widgets') and self._roi_plot_dock_widgets: + docks.extend(list(self._roi_plot_dock_widgets)) + if hasattr(self, 'roi_plot_docks_by_roi_id') and self.roi_plot_docks_by_roi_id: + for lst in self.roi_plot_docks_by_roi_id.values(): + docks.extend(list(lst)) + for d in docks: + try: + if hasattr(d, 'refresh_for_dataset_change'): + d.refresh_for_dataset_change() + except Exception: + continue + except Exception: + pass + + except Exception as e: + self.update_status(f"Error displaying 2D data: {e}") + + def _show_image_context_menu(self, pos): + """Show right-click menu for the 2D image with hover toggle and HKL plotting.""" + try: + menu = QMenu(self) + # Enable/Disable Hover + action_hover = QAction("Enable Hover", self) + action_hover.setCheckable(True) + action_hover.setChecked(bool(getattr(self, '_hover_enabled', True))) + action_hover.toggled.connect(self._toggle_hover_enabled) + menu.addAction(action_hover) + # Show current hover state explicitly (do not remove original options) + try: + state_text = "Hover: ON" if bool(getattr(self, '_hover_enabled', True)) else "Hover: OFF" + except Exception: + state_text = "Hover: ON" + action_state = QAction(state_text, self) + action_state.setEnabled(False) + menu.addAction(action_state) + # Show last HKL value if available (disabled info item) + hkl_label = "HKL: N/A" + try: + xy = getattr(self, '_last_hover_xy', None) + qxg = getattr(self, '_qx_grid', None) + qyg = getattr(self, '_qy_grid', None) + qzg = getattr(self, '_qz_grid', None) + if xy and qxg is not None and qyg is not None and qzg is not None: + x, y = int(xy[0]), int(xy[1]) + if qxg.ndim == 3 and qyg.ndim == 3 and qzg.ndim == 3: + idx = 0 + try: + idx = int(self.frame_spinbox.value()) if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled() else 0 + except Exception: + idx = 0 + if 0 <= idx < qxg.shape[0]: + H = float(qxg[idx, y, x]); K = float(qyg[idx, y, x]); L = float(qzg[idx, y, x]) + hkl_label = f"HKL: H={H:.6f}, K={K:.6f}, L={L:.6f}" + elif qxg.ndim == 2 and qyg.ndim == 2 and qzg.ndim == 2: + H = float(qxg[y, x]); K = float(qyg[y, x]); L = float(qzg[y, x]) + hkl_label = f"HKL: H={H:.6f}, K={K:.6f}, L={L:.6f}" + except Exception: + pass + action_hkl_info = QAction(hkl_label, self) + action_hkl_info.setEnabled(False) + menu.addAction(action_hkl_info) + # Plot HKL in 3D + action_plot_hkl = QAction("Plot HKL (3D)", self) + action_plot_hkl.setToolTip("Plot current frame intensity at HKL (qx,qy,qz) points") + action_plot_hkl.triggered.connect(self._plot_current_hkl_points) + menu.addAction(action_plot_hkl) + # Show menu at global position + try: + gpos = self.image_view.mapToGlobal(pos) + except Exception: + gpos = QCursor.pos() + menu.exec_(gpos) + except Exception as e: + self.update_status(f"Error showing image context menu: {e}") + + def _toggle_hover_enabled(self, enabled: bool): + try: + self._hover_enabled = bool(enabled) + self._update_hover_visibility() + except Exception: + pass + + def _update_hover_visibility(self): + try: + visible = bool(getattr(self, '_hover_enabled', True)) + for item_name in ['_hover_hline', '_hover_vline', '_hover_text']: + it = getattr(self, item_name, None) + try: + if it is not None: + it.setVisible(visible) + except Exception: + pass + except Exception: + pass + + def _update_hover_text_at(self, x: int, y: int): + """Update hover crosshair and tooltip for given pixel coordinates on current frame.""" + try: + frame = self.get_current_frame_data() + if frame is None or frame.ndim != 2: + return + height, width = frame.shape + if x < 0 or y < 0 or x >= width or y >= height: + return + # Update crosshair positions + try: + if hasattr(self, '_hover_hline') and self._hover_hline is not None: + self._hover_hline.setPos(float(y)) + if hasattr(self, '_hover_vline') and self._hover_vline is not None: + self._hover_vline.setPos(float(x)) + except Exception: + pass + # Intensity + try: + intensity = float(frame[x, y]) + except Exception: + intensity = float('nan') + # HKL text + hkl_str = "" + try: + qxg = getattr(self, '_qx_grid', None) + qyg = getattr(self, '_qy_grid', None) + qzg = getattr(self, '_qz_grid', None) + if qxg is not None and qyg is not None and qzg is not None: + if qxg.ndim == 3 and qyg.ndim == 3 and qzg.ndim == 3: + idx = 0 + try: + idx = int(self.frame_spinbox.value()) if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled() else 0 + except Exception: + idx = 0 + if 0 <= idx < qxg.shape[0]: + H = float(qxg[idx, y, x]); K = float(qyg[idx, y, x]); L = float(qzg[idx, y, x]) + hkl_str = f" | H={H:.6f}, K={K:.6f}, L={L:.6f}" + elif qxg.ndim == 2 and qyg.ndim == 2 and qzg.ndim == 2: + H = float(qxg[y, x]); K = float(qyg[y, x]); L = float(qzg[y, x]) + hkl_str = f" | H={H:.6f}, K={K:.6f}, L={L:.6f}" + except Exception: + hkl_str = "" + # Tooltip text removed; keep crosshair only + try: + if hasattr(self, '_hover_text') and self._hover_text is not None: + self._hover_text.setVisible(False) + except Exception: + pass + # Update 2D Info dock Mouse section even during playback + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + H_val = K_val = L_val = None + try: + qxg = getattr(self, '_qx_grid', None) + qyg = getattr(self, '_qy_grid', None) + qzg = getattr(self, '_qz_grid', None) + if qxg is not None and qyg is not None and qzg is not None: + if qxg.ndim == 3 and qyg.ndim == 3 and qzg.ndim == 3: + idx = int(self.frame_spinbox.value()) if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled() else 0 + if 0 <= idx < qxg.shape[0]: + H_val = float(qxg[idx, y, x]); K_val = float(qyg[idx, y, x]); L_val = float(qzg[idx, y, x]) + elif qxg.ndim == 2 and qyg.ndim == 2 and qzg.ndim == 2: + H_val = float(qxg[y, x]); K_val = float(qyg[y, x]); L_val = float(qzg[y, x]) + except Exception: + H_val = K_val = L_val = None + self.info_2d_dock.set_mouse_info((x, y), intensity, H_val, K_val, L_val) + except Exception: + pass + except Exception: + pass + + def _plot_current_hkl_points(self): + """Plot current frame intensities at HKL positions in an HKL 3D Plot Dock.""" + try: + # Ensure q-grids are available + if getattr(self, '_qx_grid', None) is None or getattr(self, '_qy_grid', None) is None or getattr(self, '_qz_grid', None) is None: + try: + self._try_load_hkl_grids() + except Exception: + pass + qxg = getattr(self, '_qx_grid', None) + qyg = getattr(self, '_qy_grid', None) + qzg = getattr(self, '_qz_grid', None) + frame = self.get_current_frame_data() + if qxg is None or qyg is None or qzg is None or frame is None: + self.update_status("HKL grids or frame not available for plotting") + return + # Select frame index if 3D + idx = 0 + try: + idx = int(self.frame_spinbox.value()) if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled() else 0 + except Exception: + idx = 0 + # Extract H,K,L arrays matching frame + try: + if qxg.ndim == 3: + qx = qxg[idx]; qy = qyg[idx]; qz = qzg[idx] + else: + qx = qxg; qy = qyg; qz = qzg + except Exception: + self.update_status("Error extracting HKL arrays for current frame") + return + # Build points and intensities + try: + H = np.asarray(qx, dtype=np.float32).ravel() + K = np.asarray(qy, dtype=np.float32).ravel() + L = np.asarray(qz, dtype=np.float32).ravel() + points = np.column_stack([H, K, L]) + intens = np.asarray(frame, dtype=np.float32).ravel() + except Exception: + self.update_status("Error building HKL points") + return + # Create or reuse HKL 3D Plot Dock + try: + from viewer.workbench.hkl_3d_plot_dock import HKL3DPlotDock + except Exception: + HKL3DPlotDock = None + if HKL3DPlotDock is None: + self.update_status("HKL3DPlotDock not available") + return + if not hasattr(self, '_hkl3d_plot_dock') or self._hkl3d_plot_dock is None: + dock_title = "HKL 3D Plot" + dock = HKL3DPlotDock(self, dock_title, self) + self.addDockWidget(Qt.RightDockWidgetArea, dock) + try: + self.add_dock_toggle_action(dock, dock_title, segment_name="2d") + except Exception: + pass + dock.show() + self._hkl3d_plot_dock = dock + # Plot points + try: + self._hkl3d_plot_dock._plot_points(points, intens) + self.update_status("Plotted HKL points for current frame") + except Exception as e: + self.update_status(f"Error plotting HKL points: {e}") + except Exception as e: + self.update_status(f"Error in HKL plot: {e}") + + def _update_hkl3d_plot_for_current_frame(self): + """If HKL 3D plot dock is open, update it to current frame.""" + try: + if hasattr(self, '_hkl3d_plot_dock') and self._hkl3d_plot_dock is not None: + self._plot_current_hkl_points() + except Exception: + pass + + def _setup_2d_hover(self): + """Create crosshair and tooltip overlays, and connect mouse move events via SignalProxy.""" + try: + if not hasattr(self, 'image_view') or self.image_view is None: + return + view = self.image_view.getView() if hasattr(self.image_view, 'getView') else None + if view is None: + return + # Create overlays only once + if not hasattr(self, '_hover_hline') or self._hover_hline is None: + try: + self._hover_hline = pg.InfiniteLine(angle=0, movable=False, pen=pg.mkPen(color=(255, 255, 0, 150), width=1)) + self.plot_item.addItem(self._hover_hline) + try: + self._hover_hline.setZValue(1000) + except Exception: + pass + except Exception: + self._hover_hline = None + if not hasattr(self, '_hover_vline') or self._hover_vline is None: + try: + self._hover_vline = pg.InfiniteLine(angle=90, movable=False, pen=pg.mkPen(color=(255, 255, 0, 150), width=1)) + self.plot_item.addItem(self._hover_vline) + try: + self._hover_vline.setZValue(1000) + except Exception: + pass + except Exception: + self._hover_vline = None + if not hasattr(self, '_hover_text') or self._hover_text is None: + try: + self._hover_text = pg.TextItem("", color=(255, 255, 255)) + try: + self._hover_text.setAnchor((0, 1)) + except Exception: + pass + self.plot_item.addItem(self._hover_text) + try: + self._hover_text.setZValue(1000) + except Exception: + pass + except Exception: + self._hover_text = None + # Connect mouse move via SignalProxy to throttle updates + try: + vb = getattr(self.plot_item, 'vb', None) + scene = vb.scene() if vb is not None else self.plot_item.scene() + self._mouse_proxy = pg.SignalProxy(scene.sigMouseMoved, rateLimit=60, slot=self._on_2d_mouse_moved) + except Exception: + self._mouse_proxy = None + except Exception as e: + try: + self.update_status(f"Error setting up 2D hover: {e}") + except Exception: + pass + + def _on_2d_mouse_moved(self, evt): + """Map scene coordinates to pixel indices; update crosshair and tooltip with intensity and HKL if available.""" + try: + # evt may be (QPointF,) from SignalProxy + pos = evt[0] if isinstance(evt, (tuple, list)) and len(evt) > 0 else evt + if not hasattr(self, 'image_view') or self.image_view is None: + return + view = self.image_view.getView() if hasattr(self.image_view, 'getView') else None + image_item = getattr(self.image_view, 'imageItem', None) + if view is None or image_item is None: + return + # Map to data coordinates + try: + vb = getattr(self.plot_item, 'vb', None) + if vb is not None: + mouse_point = vb.mapSceneToView(pos) + else: + mouse_point = view.mapSceneToView(pos) + except Exception: + return + # Respect hover enabled flag + if not bool(getattr(self, '_hover_enabled', True)): + return + x = int(round(float(mouse_point.x()))) + y = int(round(float(mouse_point.y()))) + frame = self.get_current_frame_data() + if frame is None or frame.ndim != 2: + return + height, width = frame.shape + # Move crosshairs regardless, using float positions + try: + if hasattr(self, '_hover_hline') and self._hover_hline is not None: + self._hover_hline.setPos(mouse_point.y()) + if hasattr(self, '_hover_vline') and self._hover_vline is not None: + self._hover_vline.setPos(mouse_point.x()) + except Exception: + pass + if x < 0 or y < 0 or x >= width or y >= height: + return + # Remember last valid hover position + try: + self._last_hover_xy = (x, y) + except Exception: + pass + # Intensity at pixel + try: + intensity = float(frame[x, y]) + except Exception: + intensity = float('nan') + # HKL from cached q-grids if present + hkl_str = "" + try: + qxg = getattr(self, '_qx_grid', None) + qyg = getattr(self, '_qy_grid', None) + qzg = getattr(self, '_qz_grid', None) + if qxg is not None and qyg is not None and qzg is not None: + if qxg.ndim == 3 and qyg.ndim == 3 and qzg.ndim == 3: + idx = 0 + try: + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + idx = int(self.frame_spinbox.value()) + except Exception: + idx = 0 + if 0 <= idx < qxg.shape[0]: + H = float(qxg[idx, y, x]) + K = float(qyg[idx, y, x]) + L = float(qzg[idx, y, x]) + hkl_str = f" | H={H:.6f}, K={K:.6f}, L={L:.6f}" + elif qxg.ndim == 2 and qyg.ndim == 2 and qzg.ndim == 2: + H = float(qxg[y, x]) + K = float(qyg[y, x]) + L = float(qzg[y, x]) + hkl_str = f" | H={H:.6f}, K={K:.6f}, L={L:.6f}" + except Exception: + hkl_str = "" + # Update tooltip text near cursor + try: + if hasattr(self, '_hover_text') and self._hover_text is not None: + # Hide hover text; keep crosshair only + self._hover_text.setVisible(False) + # Update 2D Info dock Mouse section + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + # Derive H,K,L values again here for precision + H_val = K_val = L_val = None + try: + qxg = getattr(self, '_qx_grid', None) + qyg = getattr(self, '_qy_grid', None) + qzg = getattr(self, '_qz_grid', None) + if qxg is not None and qyg is not None and qzg is not None: + if qxg.ndim == 3 and qyg.ndim == 3 and qzg.ndim == 3: + idx = int(self.frame_spinbox.value()) if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled() else 0 + if 0 <= idx < qxg.shape[0]: + H_val = float(qxg[idx, y, x]); K_val = float(qyg[idx, y, x]); L_val = float(qzg[idx, y, x]) + elif qxg.ndim == 2 and qyg.ndim == 2 and qzg.ndim == 2: + H_val = float(qxg[y, x]); K_val = float(qyg[y, x]); L_val = float(qzg[y, x]) + except Exception: + H_val = K_val = L_val = None + self.info_2d_dock.set_mouse_info((x, y), intensity, H_val, K_val, L_val) + except Exception: + pass + except Exception: + pass + except Exception: + pass + + def _try_load_hkl_grids(self): + """Load and cache qx/qy/qz grids (supports 2D HxW and 3D FxHxW). Called after display_2d_data.""" + try: + # Reset caches by default + self._qx_grid = None + self._qy_grid = None + self._qz_grid = None + if not getattr(self, 'current_file_path', None) or not getattr(self, 'selected_dataset_path', None): + return + with h5py.File(self.current_file_path, 'r') as h5f: + sel_path = str(self.selected_dataset_path) + parent_path = sel_path.rsplit('/', 1)[0] if '/' in sel_path else '/' + candidates = [] + try: + if parent_path in h5f: + candidates.append(h5f[parent_path]) + except Exception: + pass + try: + if '/entry/data' in h5f: + candidates.append(h5f['/entry/data']) + except Exception: + pass + qx = qy = qz = None + def find_in_group(g, name): + for key in g.keys(): + try: + if isinstance(g[key], h5py.Dataset) and key.lower() == name: + return g[key] + except Exception: + pass + return None + # Try strict names first + for g in candidates: + if g is None: + continue + try: + qx = find_in_group(g, 'qx') + qy = find_in_group(g, 'qy') + qz = find_in_group(g, 'qz') + except Exception: + qx = qy = qz = None + if qx is not None and qy is not None and qz is not None: + break + # Fallback: case-insensitive suffix match within parent group + if (qx is None or qy is None or qz is None) and parent_path in h5f: + g = h5f[parent_path] + for key in g.keys(): + try: + if not isinstance(g[key], h5py.Dataset): + continue + except Exception: + continue + lk = key.lower() + # Support additional naming conventions for HKL/q grids + if lk.endswith('qx') or lk == 'qx' or lk in ('q_x', 'qx_grid', 'qgrid_x', 'h', 'QX'.lower()): + qx = g[key] + elif lk.endswith('qy') or lk == 'qy' or lk in ('q_y', 'qy_grid', 'qgrid_y', 'k', 'QY'.lower()): + qy = g[key] + elif lk.endswith('qz') or lk == 'qz' or lk in ('q_z', 'qz_grid', 'qgrid_z', 'l', 'QZ'.lower()): + qz = g[key] + # Last resort: search entire file for datasets named like qx/qy/qz + if qx is None or qy is None or qz is None: + for group in [h5f]: + for key in group.keys(): + try: + item = group[key] + if not isinstance(item, h5py.Dataset): + continue + lk = key.lower() + if qx is None and (lk.endswith('qx') or lk == 'qx' or lk in ('q_x', 'h')): + qx = item + elif qy is None and (lk.endswith('qy') or lk == 'qy' or lk in ('q_y', 'k')): + qy = item + elif qz is None and (lk.endswith('qz') or lk == 'qz' or lk in ('q_z', 'l')): + qz = item + except Exception: + continue + if qx is None or qy is None or qz is None: + return + # Read arrays + try: + qx_arr = np.asarray(qx[...], dtype=np.float32) + qy_arr = np.asarray(qy[...], dtype=np.float32) + qz_arr = np.asarray(qz[...], dtype=np.float32) + except Exception: + return + frame = self.get_current_frame_data() + if frame is None or frame.ndim != 2: + return + h, w = frame.shape + # Normalize shapes: transpose 2D grids if (w,h) + if qx_arr.ndim == 2 and qy_arr.ndim == 2 and qz_arr.ndim == 2: + if qx_arr.shape == (w, h) and qy_arr.shape == (w, h) and qz_arr.shape == (w, h): + try: + qx_arr = qx_arr.T; qy_arr = qy_arr.T; qz_arr = qz_arr.T + except Exception: + pass + if qx_arr.shape == (h, w) and qy_arr.shape == (h, w) and qz_arr.shape == (h, w): + self._qx_grid = qx_arr + self._qy_grid = qy_arr + self._qz_grid = qz_arr + else: + return + elif qx_arr.ndim == 3 and qy_arr.ndim == 3 and qz_arr.ndim == 3: + # Expect (F, H, W), but reorder axes if needed + def reorder_to_fhw(arr, h, w): + try: + shp = arr.shape + if len(shp) != 3: + return None + # Identify axes matching h and w + idx_h = None; idx_w = None + for i, d in enumerate(shp): + if d == h and idx_h is None: + idx_h = i + for i, d in enumerate(shp): + if d == w and i != idx_h and idx_w is None: + idx_w = i + if idx_h is None or idx_w is None: + return None + idx_f = [0, 1, 2] + idx_f.remove(idx_h); idx_f.remove(idx_w) + idx_f = idx_f[0] + order = [idx_f, idx_h, idx_w] + return np.transpose(arr, axes=order) + except Exception: + return None + if not (qx_arr.shape[1:] == (h, w) and qy_arr.shape[1:] == (h, w) and qz_arr.shape[1:] == (h, w)): + rqx = reorder_to_fhw(qx_arr, h, w) + rqy = reorder_to_fhw(qy_arr, h, w) + rqz = reorder_to_fhw(qz_arr, h, w) + if rqx is not None and rqy is not None and rqz is not None: + qx_arr, qy_arr, qz_arr = rqx, rqy, rqz + if qx_arr.shape[1:] == (h, w) and qy_arr.shape[1:] == (h, w) and qz_arr.shape[1:] == (h, w): + self._qx_grid = qx_arr + self._qy_grid = qy_arr + self._qz_grid = qz_arr + else: + return + else: + return + try: + self.update_status("HKL q-grids loaded for hover") + except Exception: + pass + try: + self.set_2d_axes("h", "k") + except Exception: + pass + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + self.info_2d_dock.refresh() + except Exception: + pass + except Exception as e: + try: + self.update_status(f"HKL q-grids load failed: {e}") + except Exception: + pass + + def clear_1d_plot(self): + """Clear the 1D plot.""" + try: + if hasattr(self, 'plot_item_1d'): + self.plot_item_1d.clear() + except Exception as e: + self.update_status(f"Error clearing 1D plot: {e}") + + def display_1d_data(self, data): + """Display 1D numeric data in the 1D View.""" + try: + if not hasattr(self, 'plot_item_1d'): + print("Warning: 1D plot not initialized") + return + y = np.asarray(data, dtype=np.float32).ravel() + x = np.arange(len(y)) + self.plot_item_1d.clear() + self.plot_item_1d.plot(x, y, pen='y') + # Switch to 1D view tab + if hasattr(self, 'tabWidget_analysis'): + for i in range(self.tabWidget_analysis.count()): + if self.tabWidget_analysis.tabText(i) == "1D View": + self.tabWidget_analysis.setCurrentIndex(i) + break + except Exception as e: + self.update_status(f"Error displaying 1D data: {e}") + + def on_colormap_changed(self, colormap_name): + """Handle colormap changes.""" + try: + self.apply_colormap(colormap_name) + except Exception as e: + self.update_status(f"Error changing colormap: {e}") + + def on_auto_levels_toggled(self, enabled): + """Handle auto levels toggle.""" + try: + if hasattr(self, 'image_view') and hasattr(self.image_view, 'imageItem'): + if enabled: + # Enable auto levels + self.image_view.autoLevels() + # If disabled, keep current levels + except Exception as e: + self.update_status(f"Error toggling auto levels: {e}") + + def apply_colormap(self, colormap_name): + """Apply a colormap to the image view.""" + try: + if not hasattr(self, 'image_view'): + return + + lut = None + # Try pyqtgraph ColorMap first + try: + if hasattr(pg, "colormap") and hasattr(pg.colormap, "get"): + try: + cmap = pg.colormap.get(colormap_name) + except Exception: + cmap = None + if cmap is not None: + lut = cmap.getLookupTable(nPts=256) + except Exception: + lut = None + + # Fallback to matplotlib if needed + if lut is None: + try: + import matplotlib.pyplot as plt + mpl_cmap = plt.get_cmap(colormap_name) + # Build LUT as uint8 Nx3 + xs = np.linspace(0.0, 1.0, 256, dtype=float) + colors = mpl_cmap(xs, bytes=True) # returns Nx4 uint8 + lut = colors[:, :3] + except Exception: + # Last resort: grayscale + xs = (np.linspace(0, 255, 256)).astype(np.uint8) + lut = np.column_stack([xs, xs, xs]) + + # Apply the lookup table + try: + self.image_view.imageItem.setLookupTable(lut) + except Exception: + pass + + except Exception as e: + self.update_status(f"Error applying colormap: {e}") + + # === 3D Helpers === + def _set_3d_overlay(self, text: str): + pass + + def _debug_3d_state(self, tag: str = ""): + pass + def clear_3d_plot(self): + """Delegate 3D plot clearing to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.clear_plot() + except Exception as e: + self.update_status(f"Error clearing 3D plot: {e}") + + def create_3d_from_2d(self, data): + pass + + def create_3d_from_3d(self, data): + pass + + + + def update_intensity_controls(self, data): + """Update the intensity control ranges based on data.""" + try: + min_val = int(np.min(data)) + max_val = int(np.max(data)) + + if hasattr(self, 'sb_min_intensity_3d'): + self.sb_min_intensity_3d.setRange(min_val - 1000, max_val + 1000) + self.sb_min_intensity_3d.setValue(min_val) + + if hasattr(self, 'sb_max_intensity_3d'): + self.sb_max_intensity_3d.setRange(min_val - 1000, max_val + 1000) + self.sb_max_intensity_3d.setValue(max_val) + + except Exception as e: + self.update_status(f"Error updating intensity controls: {e}") + + def apply_3d_visibility_settings(self): + pass + + def on_3d_colormap_changed(self, colormap_name): + """Delegate 3D colormap change to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.on_colormap_changed(colormap_name) + except Exception as e: + self.update_status(f"Error changing 3D colormap: {e}") + + def toggle_3d_volume(self, checked): + """Delegate 3D volume visibility toggle to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.toggle_volume(bool(checked)) + except Exception as e: + self.update_status(f"Error toggling 3D volume: {e}") + + def toggle_3d_slice(self, checked): + """Delegate 3D slice visibility toggle to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.toggle_slice(bool(checked)) + except Exception as e: + self.update_status(f"Error toggling 3D slice: {e}") + + def toggle_3d_pointer(self, checked): + """Delegate 3D pointer visibility toggle to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.toggle_pointer(bool(checked)) + except Exception as e: + self.update_status(f"Error toggling 3D pointer: {e}") + + def update_3d_intensity(self): + """Delegate 3D intensity update to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.update_intensity() + except Exception as e: + self.update_status(f"Error updating 3D intensity: {e}") + + def change_slice_orientation(self, orientation): + """Delegate slice orientation change to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.change_slice_orientation(orientation) + except Exception as e: + self.update_status(f"Error changing slice orientation: {e}") + + def reset_3d_slice(self): + """Delegate resetting 3D slice to Tab3D.""" + try: + if hasattr(self, 'tab_3d') and self.tab_3d is not None: + self.tab_3d.reset_slice() + except Exception as e: + self.update_status(f"Error resetting 3D slice: {e}") + + # === Tree & Context Menu Events === + def _ensure_current_file_from_item(self, item): + """Ensure current_file_path is set by walking up the tree to the file root.""" + try: + cur = item + while cur is not None: + item_type = cur.data(0, Qt.UserRole + 2) + if item_type == "file_root": + file_path = cur.data(0, Qt.UserRole + 1) + if file_path: + self.current_file_path = file_path + break + cur = cur.parent() + except Exception as e: + self.update_status(f"Error resolving current file: {e}") + + def on_tree_item_clicked(self, item, column): + """ + Handle tree item single-click events - only show selection info. + + Args: + item: QTreeWidgetItem that was clicked + column: Column index (not used) + """ + # Ensure we know which file this item belongs to + self._ensure_current_file_from_item(item) + # Get the full path stored in the item + full_path = item.data(0, 32) # Qt.UserRole = 32 + + if full_path: + # Update status to show selected item + self.update_status(f"Selected: {full_path} (Double-click to load)") + + # Store selected dataset path + self.selected_dataset_path = full_path + + # Update file info display with dataset details + self.update_file_info_with_dataset(full_path) + + # Show dataset info in the workspace without loading + self.show_dataset_info(item) + else: + # Root item or group without data + item_text = item.text(0) + self.update_status(f"Selected: {item_text}") + + # Show selection info for non-dataset items + if hasattr(self, 'file_status_label'): + self.file_status_label.setText(f"Selected: {item_text}") + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText("Double-click on a dataset to load it into the workspace.") + + def on_tree_item_double_clicked(self, item, column): + """ + Handle tree item double-click events - load dataset into workspace. + + Args: + item: QTreeWidgetItem that was double-clicked + column: Column index (not used) + """ + # Get the full path stored in the item + full_path = item.data(0, 32) # Qt.UserRole = 32 + print(f"[DEBUG] Double-clicked item text: {item.text(0)}") + print(f"[DEBUG] Double-clicked item full_path (Qt.UserRole=32): {full_path}") + # Detect file root items to auto-open default dataset + try: + item_type = item.data(0, Qt.UserRole + 2) + except Exception: + item_type = None + + # If a dataset/group path exists, load it as before + if full_path: + # Update status to show loading + self.update_status(f"Loading dataset: {full_path}") + + # Ensure current_file_path points to the owning file + self._ensure_current_file_from_item(item) + # Store selected dataset path + self.selected_dataset_path = full_path + print(f"[DEBUG] selected_dataset_path set: {self.selected_dataset_path}") + + # Load and visualize the dataset + try: + self.start_dataset_load() + print("[DEBUG] visualize_selected_dataset call completed") + except Exception as e: + self.update_status(f"Error in double-click load: {e}") + print(f"[DEBUG] Exception in double-click load: {e}") + else: + # If this is a file root, auto-select and visualize the default 2D dataset + if item_type == "file_root": + print("[DEBUG] Double-clicked file root; attempting to load default dataset '/entry/data/data'") + # Ensure current_file_path points to this file + self._ensure_current_file_from_item(item) + # Set the default dataset path + self.selected_dataset_path = '/entry/data/data' + # Verify the dataset exists; if not, show message + try: + with h5py.File(self.current_file_path, 'r') as h5f: + exists = self.selected_dataset_path in h5f + print(f"[DEBUG] Default dataset exists? {exists}") + if not exists: + self.update_status("Default dataset '/entry/data/data' not found in file") + return + except Exception as e: + self.update_status(f"Error verifying default dataset: {e}") + return + # Visualize using existing logic (will use HDF5Loader for image data) + try: + self.visualize_selected_dataset() + print("[DEBUG] Default dataset visualization completed") + except Exception as e: + self.update_status(f"Error loading default dataset: {e}") + else: + # Non-dataset/group: toggle expand/collapse + print("[DEBUG] Double-clicked a non-dataset (root/group). Toggling expand/collapse.") + if item.isExpanded(): + item.setExpanded(False) + else: + item.setExpanded(True) + + def show_context_menu(self, position): + """ + Show context menu for tree items. + + Args: + position: Position where the context menu was requested + """ + item = self.tree_data.itemAt(position) + if not item: + return + + # Check the item type + item_type = item.data(0, Qt.UserRole + 2) + + if item_type == "file_root": + # Create context menu for file root + menu = QMenu(self) + + # Add collapse/expand options + if item.isExpanded(): + collapse_action = QAction("Collapse", self) + collapse_action.triggered.connect(lambda: self.collapse_item(item)) + menu.addAction(collapse_action) + else: + expand_action = QAction("Expand", self) + expand_action.triggered.connect(lambda: self.expand_item(item)) + menu.addAction(expand_action) + + menu.addSeparator() + + remove_action = QAction("Remove File", self) + remove_action.triggered.connect(lambda: self.remove_file(item)) + menu.addAction(remove_action) + + # Show menu at the requested position + menu.exec_(self.tree_data.mapToGlobal(position)) + + elif item_type == "folder_section": + # Create context menu for folder section + menu = QMenu(self) + + # Add collapse/expand options + if item.isExpanded(): + collapse_action = QAction("Collapse Folder", self) + collapse_action.triggered.connect(lambda: self.collapse_item(item)) + menu.addAction(collapse_action) + else: + expand_action = QAction("Expand Folder", self) + expand_action.triggered.connect(lambda: self.expand_item(item)) + menu.addAction(expand_action) + + menu.addSeparator() + + # Add option to collapse/expand all files in folder + collapse_all_files_action = QAction("Collapse All Files", self) + collapse_all_files_action.triggered.connect(lambda: self.collapse_all_files_in_folder(item)) + menu.addAction(collapse_all_files_action) + + expand_all_files_action = QAction("Expand All Files", self) + expand_all_files_action.triggered.connect(lambda: self.expand_all_files_in_folder(item)) + menu.addAction(expand_all_files_action) + + menu.addSeparator() + + remove_folder_action = QAction("Remove Folder", self) + remove_folder_action.triggered.connect(lambda: self.remove_folder_section(item)) + menu.addAction(remove_folder_action) + + # Show menu at the requested position + menu.exec_(self.tree_data.mapToGlobal(position)) + + def remove_file(self, item): + """ + Remove a file from the tree. + + Args: + item: QTreeWidgetItem representing the file root to remove + """ + file_path = item.data(0, Qt.UserRole + 1) + file_name = item.text(0) + + # Confirm removal + reply = QMessageBox.question( + self, + "Remove File", + f"Are you sure you want to remove '{file_name}' from the tree?", + QMessageBox.Yes | QMessageBox.No, + QMessageBox.No + ) + + if reply == QMessageBox.Yes: + # Remove the item from the tree + root = self.tree_data.invisibleRootItem() + root.removeChild(item) + + self.update_status(f"Removed file: {file_name}") + + # Update analysis placeholder if no files remain + if self.tree_data.topLevelItemCount() == 0: + if hasattr(self, 'label_analysis_placeholder'): + self.label_analysis_placeholder.setText("Load HDF5 data to begin analysis") + + def collapse_item(self, item): + """ + Collapse a specific tree item. + + Args: + item: QTreeWidgetItem to collapse + """ + item.setExpanded(False) + self.update_status(f"Collapsed: {item.text(0)}") + + def expand_item(self, item): + """ + Expand a specific tree item. + + Args: + item: QTreeWidgetItem to expand + """ + item.setExpanded(True) + self.update_status(f"Expanded: {item.text(0)}") + + def collapse_all(self): + """Collapse all items in the tree.""" + if hasattr(self, 'tree_data'): + self.tree_data.collapseAll() + self.update_status("Collapsed all tree items") + + def expand_all(self): + """Expand all items in the tree.""" + if hasattr(self, 'tree_data'): + self.tree_data.expandAll() + self.update_status("Expanded all tree items") + + def collapse_all_files_in_folder(self, folder_item): + """ + Collapse all files within a folder section. + + Args: + folder_item: QTreeWidgetItem representing the folder section + """ + for i in range(folder_item.childCount()): + child_item = folder_item.child(i) + child_item.setExpanded(False) + + folder_name = folder_item.text(0) + self.update_status(f"Collapsed all files in folder: {folder_name}") + + def expand_all_files_in_folder(self, folder_item): + """ + Expand all files within a folder section. + + Args: + folder_item: QTreeWidgetItem representing the folder section + """ + for i in range(folder_item.childCount()): + child_item = folder_item.child(i) + child_item.setExpanded(True) + + folder_name = folder_item.text(0) + self.update_status(f"Expanded all files in folder: {folder_name}") + + def remove_folder_section(self, folder_item): + """ + Remove an entire folder section from the tree. + + Args: + folder_item: QTreeWidgetItem representing the folder section to remove + """ + folder_path = folder_item.data(0, Qt.UserRole + 1) + folder_name = folder_item.text(0) + + # Confirm removal + reply = QMessageBox.question( + self, + "Remove Folder", + f"Are you sure you want to remove the entire folder section '{folder_name}' from the tree?\n\nThis will remove all files in this folder from the tree.", + QMessageBox.Yes | QMessageBox.No, + QMessageBox.No + ) + + if reply == QMessageBox.Yes: + # Remove the folder section from the tree + root = self.tree_data.invisibleRootItem() + root.removeChild(folder_item) + + self.update_status(f"Removed folder section: {folder_name}") + + # Update analysis placeholder if no files remain + if self.tree_data.topLevelItemCount() == 0: + if hasattr(self, 'file_status_label'): + self.file_status_label.setText("No HDF5 file loaded") + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText("Load HDF5 data to begin analysis") + + def show_dataset_info(self, item): + """ + Show information about the selected dataset without loading it. + + Args: + item: QTreeWidgetItem representing the dataset + """ + try: + full_path = item.data(0, 32) + if not full_path or not self.current_file_path: + return + + # Get dataset information + with h5py.File(self.current_file_path, 'r') as h5file: + if full_path in h5file: + dataset = h5file[full_path] + + if isinstance(dataset, h5py.Dataset): + # Show dataset information + shape_str = f"{dataset.shape}" if dataset.shape else "scalar" + dtype_str = str(dataset.dtype) + size_str = f"{dataset.size:,}" if dataset.size > 0 else "0" + + info_text = (f"Dataset: {full_path}\n" + f"Shape: {shape_str}\n" + f"Data Type: {dtype_str}\n" + f"Size: {size_str} elements\n\n" + f"Double-click to load into workspace") + + # Update 2D display + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText(info_text) + else: + # It's a group + group_info = f"Group: {full_path}\n\nContains {len(dataset)} items\n\nDouble-click to expand/collapse" + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText(group_info) + + except Exception as e: + error_msg = f"Error reading dataset info: {str(e)}" + if hasattr(self, 'dataset_info_text'): + self.dataset_info_text.setPlainText(error_msg) + + # === Frame Navigation === + @pyqtSlot() + def previous_frame(self): + """Navigate to the previous frame.""" + try: + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + current_frame = self.frame_spinbox.value() + if current_frame > 0: + self.frame_spinbox.setValue(current_frame - 1) + except Exception as e: + self.update_status(f"Error navigating to previous frame: {e}") + + @pyqtSlot() + def next_frame(self): + """Navigate to the next frame.""" + try: + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + current_frame = self.frame_spinbox.value() + max_frame = self.frame_spinbox.maximum() + if current_frame < max_frame: + self.frame_spinbox.setValue(current_frame + 1) + except Exception as e: + self.update_status(f"Error navigating to next frame: {e}") + + @pyqtSlot(int) + def on_frame_spinbox_changed(self, frame_index): + """Handle frame spinbox changes for 3D data navigation.""" + try: + if not hasattr(self, 'current_2d_data') or self.current_2d_data is None: + return + + if self.current_2d_data.ndim != 3: + return + + # Get the selected frame + if frame_index < 0 or frame_index >= self.current_2d_data.shape[0]: + frame_index = 0 + + frame_data = np.asarray(self.current_2d_data[frame_index], dtype=np.float32) + + # Update the image view with the new frame + auto_levels = hasattr(self, 'cbAutoLevels') and self.cbAutoLevels.isChecked() + self.image_view.setImage( + frame_data, + autoLevels=auto_levels, + autoRange=False, # Don't auto-range when changing frames + autoHistogramRange=auto_levels + ) + + # Update frame info label and button states + num_frames = self.current_2d_data.shape[0] + print(f"[FRAME] on_frame_spinbox_changed: frame_index={frame_index}, num_frames={num_frames}") + height, width = frame_data.shape + if hasattr(self, 'frame_info_label'): + self.frame_info_label.setText(f"Image Dimensions: {width}x{height} pixels (frame {frame_index} of {num_frames})") + # Update overlay text + self.update_overlay_text(width, height, f"Frame {frame_index} of {num_frames}") + + # Update hover tooltip/crosshair at last position during playback + try: + xy = getattr(self, '_last_hover_xy', None) + if xy and bool(getattr(self, '_hover_enabled', True)): + self._update_hover_text_at(int(xy[0]), int(xy[1])) + except Exception: + pass + + # Update HKL 3D plot if open + try: + self._update_hkl3d_plot_for_current_frame() + except Exception: + pass + + # Update button states + if hasattr(self, 'btn_prev_frame'): + self.btn_prev_frame.setEnabled(frame_index > 0) + if hasattr(self, 'btn_next_frame'): + self.btn_next_frame.setEnabled(frame_index < num_frames - 1) + + # Refresh ROI stats when frame changes + try: + self.roi_manager.update_all_roi_stats() + except Exception: + pass + try: + if hasattr(self, 'info_2d_dock') and self.info_2d_dock is not None: + self.info_2d_dock.refresh() + except Exception: + pass + + except Exception as e: + self.update_status(f"Error changing frame: {e}") + + def start_playback(self): + """Start frame playback if a 3D stack is loaded and controls are enabled.""" + try: + if not hasattr(self, 'current_2d_data') or self.current_2d_data is None: + return + if self.current_2d_data.ndim != 3: + return + # Only play if more than 1 frame + num_frames = self.current_2d_data.shape[0] + if num_frames <= 1: + print(f"[PLAYBACK] start_playback: num_frames={num_frames} -> not enough frames to play") + return + # Set timer interval from FPS + fps = 2 + try: + if hasattr(self, 'sb_fps'): + fps = max(1, int(self.sb_fps.value())) + except Exception: + fps = 2 + interval_ms = int(1000 / max(1, fps)) + print(f"[PLAYBACK] start_playback: num_frames={num_frames}, fps={fps}, interval_ms={interval_ms}") + # Reset frame index to 0 at playback start to avoid stale index from previous data + try: + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + self.frame_spinbox.setValue(0) + except Exception: + pass + if hasattr(self, 'play_timer') and self.play_timer is not None: + try: + self.play_timer.setInterval(interval_ms) + self.play_timer.start() + try: + print(f"[PLAYBACK] timer state: {'active' if self.play_timer.isActive() else 'inactive'}") + except Exception: + pass + except Exception as e: + print(f"[PLAYBACK] ERROR starting timer: {e}") + # Update control states + try: + self.btn_play.setEnabled(False) + self.btn_pause.setEnabled(True) + except Exception: + pass + self.update_status("Playback started") + except Exception as e: + self.update_status(f"Error starting playback: {e}") + + def pause_playback(self): + """Pause frame playback.""" + try: + if hasattr(self, 'play_timer') and self.play_timer is not None: + try: + self.play_timer.stop() + except Exception: + pass + try: + self.btn_play.setEnabled(True) + self.btn_pause.setEnabled(False) + except Exception: + pass + self.update_status("Playback paused") + except Exception as e: + self.update_status(f"Error pausing playback: {e}") + + def on_fps_changed(self, value): + """Update timer interval when FPS changes.""" + try: + fps = max(1, int(value)) + interval_ms = int(1000 / fps) + if hasattr(self, 'play_timer') and self.play_timer is not None: + try: + self.play_timer.setInterval(interval_ms) + except Exception: + pass + except Exception as e: + self.update_status(f"Error updating FPS: {e}") + + def _advance_frame_playback(self): + """Advance one frame; handle auto replay at end.""" + try: + if not hasattr(self, 'current_2d_data') or self.current_2d_data is None: + return + if self.current_2d_data.ndim != 3: + return + num_frames = self.current_2d_data.shape[0] + if num_frames <= 1: + print("[PLAYBACK] tick: num_frames<=1 -> pausing") + self.pause_playback() + return + idx = 0 + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + try: + idx = int(self.frame_spinbox.value()) + except Exception: + idx = 0 + # Clamp idx to valid range for current data + if idx < 0 or idx >= num_frames: + idx = 0 + next_idx = idx + 1 + if next_idx >= num_frames: + # Auto replay from beginning if checked + auto = False + try: + auto = bool(self.cb_auto_replay.isChecked()) if hasattr(self, 'cb_auto_replay') else False + except Exception: + auto = False + print(f"[PLAYBACK] tick: idx={idx}, next_idx={next_idx} reached end, auto_replay={auto}") + if auto: + next_idx = 0 + else: + self.pause_playback() + return + print(f"[PLAYBACK] tick: advancing to next_idx={next_idx} of num_frames={num_frames}") + # Set via spinbox to reuse existing update logic + if hasattr(self, 'frame_spinbox'): + try: + self.frame_spinbox.setValue(next_idx) + except Exception as e: + print(f"[PLAYBACK] ERROR setting frame_spinbox: {e}") + except Exception: + pass + + # === File Info Display === + def update_file_info_display(self, file_path, additional_info=None): + """ + Update the file information display with details about the current file. + + Args: + file_path (str): Path to the current file or status message + additional_info (dict): Additional information to display + """ + if not hasattr(self, 'file_info_text'): + return + + try: + if file_path == "No file loaded" or not os.path.exists(file_path): + # Show default message + self.file_info_text.setPlainText("Load an HDF5 file to view file information.") + return + + # Get file information + file_stats = os.stat(file_path) + file_size = file_stats.st_size + file_modified = os.path.getmtime(file_path) + + # Format file size + if file_size < 1024: + size_str = f"{file_size} bytes" + elif file_size < 1024 * 1024: + size_str = f"{file_size / 1024:.1f} KB" + elif file_size < 1024 * 1024 * 1024: + size_str = f"{file_size / (1024 * 1024):.1f} MB" + else: + size_str = f"{file_size / (1024 * 1024 * 1024):.1f} GB" + + # Format modification time + import datetime + mod_time = datetime.datetime.fromtimestamp(file_modified).strftime("%Y-%m-%d %H:%M:%S") + + # Get HDF5 file information + info_lines = [] + info_lines.append(f"File: {os.path.basename(file_path)}") + info_lines.append(f"Path: {os.path.dirname(file_path)}") + info_lines.append(f"Size: {size_str}") + info_lines.append(f"Modified: {mod_time}") + + try: + with h5py.File(file_path, 'r') as h5file: + # Count groups and datasets + def count_items(group, counts): + for key in group.keys(): + item = group[key] + if isinstance(item, h5py.Group): + counts['groups'] += 1 + count_items(item, counts) + elif isinstance(item, h5py.Dataset): + counts['datasets'] += 1 + + counts = {'groups': 0, 'datasets': 0} + count_items(h5file, counts) + + info_lines.append("") + info_lines.append("HDF5 Structure:") + info_lines.append(f"Groups: {counts['groups']}") + info_lines.append(f"Datasets: {counts['datasets']}") + + # Get HDF5 attributes if any + if h5file.attrs: + info_lines.append(f"File Attributes: {len(h5file.attrs)}") + + except Exception as e: + info_lines.append("") + info_lines.append(f"HDF5 Error: {str(e)}") + + # Add additional info if provided + if additional_info: + info_lines.append("") + info_lines.append("Selection Details:") + for key, value in additional_info.items(): + info_lines.append(f"{key}: {value}") + + # Update the file info tab + self.file_info_text.setPlainText("\n".join(info_lines)) + + except Exception as e: + error_text = f"Error reading file information: {str(e)}" + self.file_info_text.setPlainText(error_text) + + def update_file_info_with_dataset(self, dataset_path): + """ + Update the file information display with details about the selected dataset. + + Args: + dataset_path (str): Path to the selected dataset within the HDF5 file + """ + if not self.current_file_path or not dataset_path: + return + + try: + with h5py.File(self.current_file_path, 'r') as h5file: + if dataset_path in h5file: + item = h5file[dataset_path] + + additional_info = {} + + if isinstance(item, h5py.Dataset): + # Dataset information + additional_info['Selected Dataset'] = dataset_path + additional_info['Shape'] = str(item.shape) if item.shape else "scalar" + additional_info['Data Type'] = str(item.dtype) + additional_info['Size'] = f"{item.size:,} elements" if item.size > 0 else "0 elements" + + # Memory size estimation + if item.size > 0: + mem_size = item.size * item.dtype.itemsize + if mem_size < 1024: + mem_str = f"{mem_size} bytes" + elif mem_size < 1024 * 1024: + mem_str = f"{mem_size / 1024:.1f} KB" + elif mem_size < 1024 * 1024 * 1024: + mem_str = f"{mem_size / (1024 * 1024):.1f} MB" + else: + mem_str = f"{mem_size / (1024 * 1024 * 1024):.1f} GB" + additional_info['Memory Size'] = mem_str + + # Dataset attributes + if item.attrs: + additional_info['Dataset Attributes'] = f"{len(item.attrs)} attributes" + + # Compression info + if item.compression: + additional_info['Compression'] = item.compression + if item.compression_opts: + additional_info['Compression Level'] = str(item.compression_opts) + + elif isinstance(item, h5py.Group): + # Group information + additional_info['Selected Group'] = dataset_path + additional_info['Contains'] = f"{len(item)} items" + + # Count subgroups and datasets + subgroups = sum(1 for key in item.keys() if isinstance(item[key], h5py.Group)) + subdatasets = sum(1 for key in item.keys() if isinstance(item[key], h5py.Dataset)) + + if subgroups > 0: + additional_info['Subgroups'] = str(subgroups) + if subdatasets > 0: + additional_info['Subdatasets'] = str(subdatasets) + + # Group attributes + if item.attrs: + additional_info['Group Attributes'] = f"{len(item.attrs)} attributes" + + # Update the display with additional dataset/group info + self.update_file_info_display(self.current_file_path, additional_info) + + except Exception as e: + # Fall back to basic file info if dataset reading fails + self.update_file_info_display(self.current_file_path, + {'Error': f"Could not read dataset info: {str(e)}"}) + + # === Speckle Analysis & ROI === + def on_log_scale_toggled(self, checked): + """Handle log scale checkbox toggle.""" + try: + if hasattr(self, 'image_view') and hasattr(self, 'current_2d_data') and self.current_2d_data is not None: + # Get current frame index + current_frame = 0 + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + current_frame = self.frame_spinbox.value() + + # Get the current frame data + if self.current_2d_data.ndim == 3: + frame_data = self.current_2d_data[current_frame] + else: + frame_data = self.current_2d_data + + # Apply or remove log scale + if checked: + # Apply log scale (log1p to handle zeros) + display_data = np.log1p(np.maximum(frame_data, 0)) + else: + # Use original data + display_data = frame_data + + # Update the image view + auto_levels = hasattr(self, 'cbAutoLevels') and self.cbAutoLevels.isChecked() + self.image_view.setImage( + display_data, + autoLevels=auto_levels, + autoRange=False, + autoHistogramRange=auto_levels + ) + + # Apply current colormap + if hasattr(self, 'cbColorMapSelect_2d'): + current_colormap = self.cbColorMapSelect_2d.currentText() + self.apply_colormap(current_colormap) + + # Update vmin/vmax controls for log scale + self.update_vmin_vmax_for_log_scale(frame_data, checked) + + # Refresh ROI stats to reflect displayed image + try: + self.roi_manager.update_all_roi_stats() + except Exception: + pass + + self.update_status(f"Log scale {'enabled' if checked else 'disabled'}") + else: + print("No image data available for log scale") + except Exception as e: + self.update_status(f"Error toggling log scale: {e}") + + def update_vmin_vmax_for_log_scale(self, data, log_scale_enabled): + """Update vmin/vmax controls based on log scale state.""" + try: + if log_scale_enabled: + # For log scale, set reasonable ranges + min_val = max(1, int(np.min(data[data > 0]))) if np.any(data > 0) else 1 + max_val = int(np.max(data)) + + if hasattr(self, 'sbVmin'): + self.sbVmin.setRange(1, max_val) + self.sbVmin.setValue(min_val) + + if hasattr(self, 'sbVmax'): + self.sbVmax.setRange(min_val + 1, max_val * 2) + self.sbVmax.setValue(max_val) + else: + # For linear scale, use full data range + min_val = int(np.min(data)) + max_val = int(np.max(data)) + + if hasattr(self, 'sbVmin'): + self.sbVmin.setRange(min_val, max_val) + self.sbVmin.setValue(min_val) + + if hasattr(self, 'sbVmax'): + self.sbVmax.setRange(min_val + 1, max_val * 2) + self.sbVmax.setValue(max_val) + except Exception as e: + self.update_status(f"Error updating vmin/vmax controls: {e}") + + def on_vmin_changed(self, value): + """Handle vmin spinbox value change.""" + try: + if hasattr(self, 'image_view') and hasattr(self, 'current_2d_data') and self.current_2d_data is not None: + # Get current vmax + vmax = self.sbVmax.value() if hasattr(self, 'sbVmax') else 100 + + # Ensure vmin < vmax + if value >= vmax: + return + + # Apply log scale if enabled + if hasattr(self, 'cbLogScale') and self.cbLogScale.isChecked(): + vmin_display = np.log1p(value) + vmax_display = np.log1p(vmax) + else: + vmin_display = value + vmax_display = vmax + + # Update image levels + self.image_view.setLevels(min=vmin_display, max=vmax_display) + # Refresh ROI stats (based on displayed image) + try: + self.roi_manager.update_all_roi_stats() + except Exception: + pass + self.update_status(f"Vmin set to: {value}") + except Exception as e: + self.update_status(f"Error changing vmin: {e}") + + def on_vmax_changed(self, value): + """Handle vmax spinbox value change.""" + try: + if hasattr(self, 'image_view') and hasattr(self, 'current_2d_data') and self.current_2d_data is not None: + # Get current vmin + vmin = self.sbVmin.value() if hasattr(self, 'sbVmin') else 0 + + # Ensure vmax > vmin + if value <= vmin: + return + + # Apply log scale if enabled + if hasattr(self, 'cbLogScale') and self.cbLogScale.isChecked(): + vmin_display = np.log1p(vmin) + vmax_display = np.log1p(value) + else: + vmin_display = vmin + vmax_display = value + + # Update image levels + self.image_view.setLevels(min=vmin_display, max=vmax_display) + # Refresh ROI stats (based on displayed image) + try: + self.roi_manager.update_all_roi_stats() + except Exception: + pass + self.update_status(f"Vmax set to: {value}") + except Exception as e: + self.update_status(f"Error changing vmax: {e}") + + def on_draw_roi_clicked(self): + """Handle Draw ROI button click (delegated to ROIManager).""" + try: + self.roi_manager.create_and_add_roi() + except Exception as e: + self.update_status(f"Error drawing ROI: {e}") + + def on_ref_frame_changed(self, value): + """Handle reference frame spinbox value change.""" + try: + # Update the current frame to match reference frame + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + self.frame_spinbox.setValue(value) + self.update_status(f"Reference frame set to: {value}") + except Exception as e: + self.update_status(f"Error changing reference frame: {e}") + + def on_other_frame_changed(self, value): + """Handle other frame spinbox value change.""" + try: + self.update_status(f"Other frame set to: {value}") + except Exception as e: + self.update_status(f"Error changing other frame: {e}") + + + + + + + + # === Control Updates === + def update_frame_controls_for_2d_data(self): + """Update frame controls for 2D data (disable frame navigation).""" + try: + if hasattr(self, 'frame_spinbox'): + self.frame_spinbox.setEnabled(False) + self.frame_spinbox.setValue(0) + self.frame_spinbox.setMaximum(0) + + if hasattr(self, 'btn_prev_frame'): + self.btn_prev_frame.setEnabled(False) + if hasattr(self, 'btn_next_frame'): + self.btn_next_frame.setEnabled(False) + + # Stop playback timer and disable controls + try: + if hasattr(self, 'play_timer') and self.play_timer is not None: + self.play_timer.stop() + if hasattr(self, 'btn_play'): + self.btn_play.setEnabled(False) + if hasattr(self, 'btn_pause'): + self.btn_pause.setEnabled(False) + if hasattr(self, 'sb_fps'): + self.sb_fps.setEnabled(False) + if hasattr(self, 'cb_auto_replay'): + self.cb_auto_replay.setEnabled(False) + except Exception: + pass + except Exception as e: + self.update_status(f"Error updating frame controls for 2D data: {e}") + + def update_frame_controls_for_3d_data(self, num_frames): + """Update frame controls for 3D data (enable frame navigation).""" + try: + if hasattr(self, 'frame_spinbox'): + self.frame_spinbox.setEnabled(True) + self.frame_spinbox.setMaximum(num_frames - 1) + self.frame_spinbox.setValue(0) + + if hasattr(self, 'btn_prev_frame'): + self.btn_prev_frame.setEnabled(False) # Disabled for frame 0 + if hasattr(self, 'btn_next_frame'): + self.btn_next_frame.setEnabled(num_frames > 1) + + # Enable or disable playback controls based on frame count + try: + enable_playback = num_frames > 1 + if hasattr(self, 'btn_play'): + self.btn_play.setEnabled(enable_playback) + if hasattr(self, 'btn_pause'): + self.btn_pause.setEnabled(False) # initially paused + if hasattr(self, 'sb_fps'): + self.sb_fps.setEnabled(enable_playback) + if hasattr(self, 'cb_auto_replay'): + self.cb_auto_replay.setEnabled(enable_playback) + try: + # Select auto replay when playback becomes available + self.cb_auto_replay.setChecked(True) + except Exception: + pass + # Stop timer on reconfigure + if hasattr(self, 'play_timer') and self.play_timer is not None: + self.play_timer.stop() + except Exception: + pass + except Exception as e: + self.update_status(f"Error updating frame controls for 3D data: {e}") + + def update_speckle_controls_for_data(self, data): + """Update speckle analysis controls based on loaded data.""" + try: + if data.ndim == 3: + # 3D data - enable frame selection + max_frame = data.shape[0] - 1 + + if hasattr(self, 'sbRefFrame'): + self.sbRefFrame.setMaximum(max_frame) + self.sbRefFrame.setValue(0) + self.sbRefFrame.setEnabled(True) + + if hasattr(self, 'sbOtherFrame'): + self.sbOtherFrame.setMaximum(max_frame) + self.sbOtherFrame.setValue(min(1, max_frame)) + self.sbOtherFrame.setEnabled(True) + else: + # 2D data - disable frame selection + if hasattr(self, 'sbRefFrame'): + self.sbRefFrame.setValue(0) + self.sbRefFrame.setMaximum(0) + self.sbRefFrame.setEnabled(False) + + if hasattr(self, 'sbOtherFrame'): + self.sbOtherFrame.setValue(0) + self.sbOtherFrame.setMaximum(0) + self.sbOtherFrame.setEnabled(False) + + except Exception as e: + self.update_status(f"Error updating speckle controls: {e}") + + def update_vmin_vmax_controls_for_data(self, data): + """Update vmin/vmax controls based on data range.""" + try: + min_val = int(np.min(data)) + max_val = int(np.max(data)) + + if hasattr(self, 'sbVmin'): + self.sbVmin.setRange(min_val, max_val) + self.sbVmin.setValue(min_val) + + if hasattr(self, 'sbVmax'): + self.sbVmax.setRange(min_val + 1, max_val * 2) + self.sbVmax.setValue(max_val) + + except Exception as e: + self.update_status(f"Error updating vmin/vmax controls: {e}") + + # === ROI Stats & Context Menu Actions === + def get_current_frame_data(self): + try: + if not hasattr(self, 'current_2d_data') or self.current_2d_data is None: + return None + if self.current_2d_data.ndim == 3: + frame_index = 0 + if hasattr(self, 'frame_spinbox') and self.frame_spinbox.isEnabled(): + frame_index = self.frame_spinbox.value() + if frame_index < 0 or frame_index >= self.current_2d_data.shape[0]: + frame_index = 0 + return np.asarray(self.current_2d_data[frame_index], dtype=np.float32) + else: + return np.asarray(self.current_2d_data, dtype=np.float32) + except Exception: + return None + + def compute_roi_stats(self, frame_data, roi): + try: + if frame_data is None or roi is None: + return None + height, width = frame_data.shape + pos = roi.pos(); size = roi.size() + x0 = max(0, int(pos.x())); y0 = max(0, int(pos.y())) + w = max(1, int(size.x())); h = max(1, int(size.y())) + x1 = min(width, x0 + w); y1 = min(height, y0 + h) + if x0 >= x1 or y0 >= y1: + return None + sub = frame_data[y0:y1, x0:x1] + stats = { + 'x': x0, 'y': y0, 'w': x1 - x0, 'h': y1 - y0, + 'sum': float(np.sum(sub)), + 'min': float(np.min(sub)), + 'max': float(np.max(sub)), + 'mean': float(np.mean(sub)), + 'std': float(np.std(sub)), + 'count': int(sub.size), + } + return stats + except Exception: + return None + + def show_roi_stats_for_roi(self, roi): + """Delegate to ROIManager.""" + try: + self.roi_manager.show_roi_stats_for_roi(roi) + except Exception as e: + self.update_status(f"Error showing ROI stats: {e}") + + def set_active_roi(self, roi): + """Delegate to ROIManager.""" + try: + self.roi_manager.set_active_roi(roi) + except Exception as e: + self.update_status(f"Error setting active ROI: {e}") + + def delete_roi(self, roi): + """Delegate to ROIManager.""" + try: + self.roi_manager.delete_roi(roi) + except Exception as e: + self.update_status(f"Error deleting ROI: {e}") + + # === File & Dataset Info Helpers === + +# === Entrypoint === +def main(): + """Main entry point for the Workbench application.""" + app = QApplication(sys.argv) + + # Set application properties + app.setApplicationName("Workbench") + app.setApplicationVersion("1.0.0") + app.setOrganizationName("DashPVA") + + # Global excepthook to log unhandled errors to error_output.txt + def _log_excepthook(exctype, value, tb): + try: + import datetime, traceback + error_file = project_root / "error_output.txt" + with open(error_file, "a") as f: + f.write(f"[{datetime.datetime.now().isoformat()}] Unhandled exception: {exctype.__name__}: {value}\n") + traceback.print_tb(tb, file=f) + except Exception: + pass + sys.excepthook = _log_excepthook + + # Create and show the main window + window = WorkbenchWindow() + window.show() + + # Start the event loop + sys.exit(app.exec_()) + +if __name__ == "__main__": + main() diff --git a/viewer/workbench/workers/__init__.py b/viewer/workbench/workers/__init__.py new file mode 100644 index 0000000..1fb4062 --- /dev/null +++ b/viewer/workbench/workers/__init__.py @@ -0,0 +1,182 @@ +from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot +import numpy as np + +class Render3D(QObject): + finished = pyqtSignal() + render_ready = pyqtSignal(object) + + def __init__(self, *, points=None, intensities=None, num_images=None, shape=None, parent=None): + super().__init__(parent) + self.points = points + self.intensities = intensities + self.num_images = int(num_images) if num_images is not None else 0 + self.shape = tuple(shape) if shape is not None else (0, 0) + + @pyqtSlot() + def run(self): + try: + pts = np.asarray(self.points, dtype=float) if self.points is not None else np.empty((0, 3), dtype=float) + ints = np.asarray(self.intensities, dtype=float).ravel() if self.intensities is not None else np.empty((0,), dtype=float) + + if pts.ndim != 2 or (pts.size > 0 and pts.shape[1] != 3): + pts = np.empty((0, 3), dtype=float) + + if ints.size: + high_mask = ints > 5e6 + if np.any(high_mask): + ints[high_mask] = 0.0 + + self.points = pts + self.intensities = ints + self.render_ready.emit(self) + finally: + self.finished.emit() + + def plot_3d_points(self, target_tab): + """ + target_tab: The Tab3D instance. + It provides access to self.plotter and the local UI widgets. + """ + try: + import pyvista as pyv + pts = self.points + ints = self.intensities + + # 1. Use the plotter local to the Tab + plotter = target_tab.plotter + + if pts.ndim != 2 or pts.shape[1] != 3 or ints.size != pts.shape[0]: + from PyQt5.QtWidgets import QMessageBox + QMessageBox.warning(target_tab, '3D Viewer', 'Invalid point cloud data.') + return + + plotter.clear() + plotter.add_axes(xlabel='H', ylabel='K', zlabel='L') + + # --- Setup LUTs --- + lut = pyv.LookupTable(cmap='viridis') + lut.below_range_color = 'black' + lut.above_range_color = (1.0, 1.0, 0.0) + lut.below_range_opacity = 0 + lut.apply_opacity([0, 1]) + lut.above_range_opacity = 1 + + # --- Create Mesh --- + mesh = pyv.PolyData(pts) + mesh['intensity'] = ints + + # Store references on the Tab instance for intensity updates later + target_tab.cloud_mesh_3d = mesh + target_tab.lut = lut + + # --- Add to Plotter --- + plotter.add_mesh( + mesh, + scalars='intensity', + cmap=lut, + point_size=5.0, + name='points', + show_scalar_bar=True, + nan_opacity=0.0, + show_edges=False + ) + + plotter.show_bounds( + mesh=mesh, + xtitle='H Axis', ytitle='K Axis', ztitle='L Axis', + bounds=mesh.bounds, + ) + plotter.reset_camera() + + # -- slice -- + slice_normal = (0, 0, 1) + slice_origin = mesh.center + + target_tab.plane_widget = plotter.add_plane_widget( + callback=target_tab.on_plane_update, + normal=slice_normal, + origin=slice_origin, + bounds=mesh.bounds, + factor=1.0, + implicit=True, + assign_to_axis=None, + tubing=False, + origin_translation=True, + outline_opacity=0 + ) + + # --- Update Local Tab UI Widgets --- + # Note: We now look for names from tab_3d.ui + ints_range = (int(np.min(ints)), int(np.max(ints))) + + if hasattr(target_tab, 'sb_min_intensity_3d'): + target_tab.sb_min_intensity_3d.setRange(*ints_range) # Expand range + target_tab.sb_min_intensity_3d.setValue(int(np.min(ints))) + + if hasattr(target_tab, 'sb_max_intensity_3d'): + target_tab.sb_max_intensity_3d.setRange(*ints_range) + target_tab.sb_max_intensity_3d.setValue(int(np.max(ints))) + + # Call intensity update logic local to the tab + if hasattr(target_tab, 'update_intensity'): + target_tab.update_intensity() + + + plotter.render() + + except Exception as e: + print(f"Error in plot_3d_points: {e}") + +class DatasetLoader(QObject): + loaded = pyqtSignal(object) # numpy array + failed = pyqtSignal(str) + + def __init__(self, file_path, dataset_path, max_frames=100): + super().__init__() + self.file_path = file_path + self.dataset_path = dataset_path + self.max_frames = max_frames + + @pyqtSlot() + def run(self): + try: + import h5py + with h5py.File(self.file_path, 'r') as h5file: + if self.dataset_path not in h5file: + self.failed.emit("Dataset not found") + return + dset = h5file[self.dataset_path] + if not isinstance(dset, h5py.Dataset): + self.failed.emit("Selected item is not a dataset") + return + + # Efficient loading to avoid blocking on huge datasets + if len(dset.shape) == 3: + max_frames = min(self.max_frames, dset.shape[0]) + data = dset[:max_frames] + else: + # Guard against extremely large 2D datasets by center cropping + try: + estimated_size = dset.size * dset.dtype.itemsize + except Exception: + estimated_size = 0 + if len(dset.shape) == 2 and estimated_size > 512 * 1024 * 1024: # >512MB + h, w = dset.shape + ch = min(h, 2048) + cw = min(w, 2048) + y0 = max(0, (h - ch) // 2) + x0 = max(0, (w - cw) // 2) + data = dset[y0:y0+ch, x0:x0+cw] + else: + data = dset[...] + + data = np.asarray(data, dtype=np.float32) + # Clean high values + high_mask = data > 5e6 + if np.any(high_mask): + data[high_mask] = 0 + + # 1D handling: emit raw 1D data for dedicated 1D view + self.loaded.emit(data) + except Exception as e: + self.failed.emit(f"Error loading dataset: {e}")