diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5803107..76d8f18 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.9", "3.10", "3.11"] + # 与 pyproject.toml 中 python = "^3.10" 一致 / Match Poetry python constraint + python-version: ["3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 @@ -37,6 +38,19 @@ jobs: - name: 安装依赖 run: poetry install --no-interaction --no-root + - name: 设置 Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" + cache: "npm" + cache-dependency-path: web/analysis-ui/package-lock.json + + - name: 构建星空解算控制台前端 + run: | + cd web/analysis-ui + npm ci + npm run build + - name: 代码格式检查 (Black) run: poetry run black --check ogscope tests @@ -56,6 +70,7 @@ jobs: file: ./coverage.xml flags: unittests name: codecov-umbrella + fail_ci_if_error: false # 无 CODECOV_TOKEN 的 fork 等场景不阻断 CI / Do not fail CI without token lint: runs-on: ubuntu-latest @@ -65,7 +80,7 @@ jobs: - name: 设置 Python uses: actions/setup-python@v4 with: - python-version: "3.9" + python-version: "3.10" - name: 安装 Poetry uses: snok/install-poetry@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e68299a..b1bb87f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,7 +14,7 @@ jobs: - name: 设置 Python uses: actions/setup-python@v4 with: - python-version: "3.9" + python-version: "3.10" - name: 安装 Poetry uses: snok/install-poetry@v1 diff --git a/.gitignore b/.gitignore index 17cccc3..94b2093 100644 --- a/.gitignore +++ b/.gitignore @@ -37,6 +37,11 @@ env.bak/ venv.bak/ .python-version +# ==================== +# Node (星图解算实验室前端 / Analysis lab UI) +# ==================== +web/analysis-ui/node_modules/ + # ==================== # Poetry # ==================== @@ -143,6 +148,9 @@ captured_images/ *.fits *.fit +# Tetra3 图案库(体积大,本地或部署时放入 / Large pattern DB; copy on deploy) +data/plate_solve/default_database.npz + # 星表数据(太大,不提交) star_catalogs/*.dat star_catalogs/*.bin diff --git a/CLAUDE.md b/CLAUDE.md index 7be2494..3f7eed5 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -15,6 +15,7 @@ OGScope 是一个基于 Orange Pi Zero 2W 的电子极轴镜系统,用于天 - **日志**: Loguru - **测试**: Pytest - **代码质量**: Black, Ruff, MyPy +- **星空解算控制台前端**: `web/analysis-ui`(Vite + React + Tailwind),构建输出 `web/static/analysis-lab/`;本地执行 `cd web/analysis-ui && npm install && npm run build`;同步到开发板见 `scripts/sync_dev_board.sh`(环境变量 `OGSCOPE_DEV_HOST`、`OGSCOPE_DEV_PATH` 等)。i18n 见 `web/static/i18n/analysis.zh.json` / `analysis.en.json`。 ## 开发环境 diff --git a/data/analysis/presets/official/default_widefield.json b/data/analysis/presets/official/default_widefield.json new file mode 100644 index 0000000..e722d97 --- /dev/null +++ b/data/analysis/presets/official/default_widefield.json @@ -0,0 +1,13 @@ +{ + "id": "default_widefield", + "name": "默认广角 / Default widefield", + "scope": "official", + "params": { + "fov_estimate": 16.0, + "fov_max_error": 5.0, + "solve_timeout_ms": 8000, + "hint_ra_deg": 45.0, + "hint_dec_deg": 80.0 + }, + "created_at": "2026-01-01T00:00:00+00:00" +} diff --git a/data/catalog/README.md b/data/catalog/README.md deleted file mode 100644 index 4d36436..0000000 --- a/data/catalog/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# 星表数据库说明 / Star Catalog Database Notes - -- 主库文件:`data/catalog/stars.db` -- 数据源(默认):HYG Database v3(CSV) -- 用途:提供极轴解算与调试控制台的星点查询、匹配与维护 - -## 字段说明 / Fields - -- `source_id`: 唯一标识 / Unique identifier -- `ra`, `dec`: 赤经赤纬(度)/ Right ascension and declination in degrees -- `pmra`, `pmdec`: 自行参数 / Proper motion parameters -- `phot_g_mean_mag`: 亮度星等 / Magnitude -- `name_en`, `name_zh`: 英文/中文名称 -- `description_en`, `description_zh`: 英文/中文描述 - -## 维护方式 / Maintenance - -- 可通过 `/api/catalog/*` 与 `/debug/analysis` 执行下载、索引、CRUD。 -- 数据库文件会随 Git 提交与版本发行。 diff --git a/data/catalog/meta/manifest.json b/data/catalog/meta/manifest.json deleted file mode 100644 index 0f365d0..0000000 --- a/data/catalog/meta/manifest.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "generated_at": "2026-03-27T14:47:15.117645+00:00", - "source_file": "data/catalog/raw/catalog_raw.csv", - "db_path": "data/catalog/stars.db", - "magnitude_limit": 12.0, - "ra_bin_size_deg": 5.0, - "record_count": 117931, - "bucket_count": 72, - "source_sha256": "d9f69fd86bbf90a4e4d52b4c5c53eacfa6dfc0bfdef85bfd94f095e0bebe4ebd", - "epoch": "JNow(approx)", - "status": "ready" -} \ No newline at end of file diff --git a/data/catalog/stars.db b/data/catalog/stars.db deleted file mode 100644 index 34b0c4f..0000000 Binary files a/data/catalog/stars.db and /dev/null differ diff --git a/data/plate_solve/README.md b/data/plate_solve/README.md new file mode 100644 index 0000000..6eeec1b --- /dev/null +++ b/data/plate_solve/README.md @@ -0,0 +1,5 @@ +# Plate solve 数据目录 / Plate solve data directory + +请将 `default_database.npz`(Tetra3 图案库)放在此目录,或通过 `OGSCOPE_SOLVER_TETRA_DATABASE_PATH` 指定绝对路径。 + +详细说明见 [docs/development/plate-solve-data.md](../../docs/development/plate-solve-data.md)。 diff --git a/docs/development/README.md b/docs/development/README.md index 871c3ec..dd98f80 100644 --- a/docs/development/README.md +++ b/docs/development/README.md @@ -9,6 +9,61 @@ 当前推荐流程为:**本地编辑代码 -> 上传到开发板 -> 使用 `systemd` 重启服务验证**。 该流程与实际硬件运行环境一致,适合涉及相机与系统库依赖的场景。 +## 0. 部署速查(爱好者复刻) + +本节与 **§1–§11** 的关系:**只列最常用命令与检查项**;Poetry/PEP 668、镜像选项、卸载与排错原理见后文对应章节。 + +### 0.1 系统要求 + +- 单板:**ARM**(`aarch64` 或 `armhf`),如 Raspberry Pi / Orange Pi +- 系统:**Debian/apt** 系镜像(与 `picamera2`/`libcamera` 文档一致;脚本会读 `/etc/os-release`,见 **§1.4**) +- Python:**3.10+**(以 `pyproject.toml` 为准) +- 网络:首次安装需拉取依赖;浏览器访问 Web 需可达设备 **TCP 8000**(按需防火墙放行) + +### 0.2 首次安装 + +```bash +cd /path/to/OGScope +chmod +x scripts/install.sh +./scripts/install.sh +``` + +说明摘要:默认 `poetry install --only main`;国内网络可 **`export OGSCOPE_MIRROR=cn`**;低配板可 **`OGSCOPE_APT_SLOW=1`**。完整选项见 **§1.4**。安装后:`sudo systemctl start ogscope`。 + +### 0.3 星图解算数据 + +将 **`default_database.npz`** 放到 **`data/plate_solve/`**(不随仓库分发)。放置与配置见 [plate-solve-data.md](plate-solve-data.md)。 + +### 0.4 日常更新 + +```bash +cd /path/to/OGScope +chmod +x scripts/board-update.sh +# 可选:OGSCOPE_GIT_PULL=1 OGSCOPE_MIRROR=cn +./scripts/board-update.sh +``` + +详情见 **§6.2**。 + +### 0.5 卸载与健康检查 + +- 卸载服务与 `.venv`:见 **§6.3**(`scripts/uninstall.sh`) +- 健康检查与日志: + +```bash +curl -s http://127.0.0.1:8000/health +sudo systemctl status ogscope +sudo journalctl -u ogscope -f +``` + +### 0.6 常见故障(简表) + +| 现象 | 处理方向 | +|------|----------| +| `ImportError: picamera2` | 用 `apt` 装相机栈;venv 由 `install.sh` 配置(**§1.2、§3**) | +| PEP 668 / 系统 pip 被拒 | 只用项目 `.venv`,勿在系统 Python 上混装(**§1.2**) | +| 服务无法启动 | 查 `WorkingDirectory`、`ExecStart`、`journalctl`(**§10**) | + ## 1. Python 版本与项目依赖 ### 1.1 Python 版本基线 @@ -17,7 +72,13 @@ - 建议开发板使用 Python 3.10 及以上版本 - 若其他文档出现 `3.9+`,应视为历史描述 -### 1.2 安装 Poetry 与 Python 依赖 +### 1.2 Poetry、PEP 668 与虚拟环境(必读) + +- **必须使用 Poetry 创建的项目内虚拟环境**(`.venv`),**禁止**全局设置 `virtualenvs.create false` 后在系统 Python 上混装依赖;否则易触发 **PEP 668**(发行版保护系统 site-packages,`pip`/`poetry` 无法改写系统包)。 +- 开发板推荐由 `scripts/install.sh` 统一写入:`virtualenvs.create true`、`virtualenvs.in-project true`,并尽量启用 **`virtualenvs.options.system-site-packages true`**,使 venv 能解析通过 `apt` 安装的 `picamera2` 等系统包。 +- **生产/板端**默认仅安装运行时依赖:`poetry install --only main`(脚本默认)。若需 pytest、类型检查等,在开发机或板上设置 `OGSCOPE_INSTALL_DEV=1` 后重装。 + +### 1.3 安装 Poetry 与 Python 依赖 ```bash # 进入项目目录 @@ -27,18 +88,26 @@ cd /path/to/OGScope curl -sSL https://install.python-poetry.org | python3 - export PATH="$HOME/.local/bin:$PATH" -# 安装项目依赖 +# 开发机:完整依赖(含 dev) poetry install + +# 开发板(手动维护时):仅运行时依赖,与 install.sh 默认一致 +# poetry install --no-interaction --only main ``` -### 1.3 使用安装脚本(推荐首次部署) +### 1.4 使用安装脚本(推荐首次部署) 仓库提供 `scripts/install.sh`,用于在开发板执行一次性环境准备。脚本会: +- 读取 `/etc/os-release` 识别发行版,**仅支持 Debian/Ubuntu 系**(含 **Raspberry Pi OS**、Orange Pi Debian 等);非该系将退出,避免误改软件源 - 安装系统依赖与 Poetry -- 安装项目 Python 依赖 +- 配置 Poetry 使用项目 `.venv` 与 `system-site-packages`(Poetry 版本支持时) +- 默认执行 `poetry install --only main`(设 `OGSCOPE_INSTALL_DEV=1` 可装 dev) +- 可选 `OGSCOPE_APT_SLOW=1`:分批 `apt` 并在批次间暂停,减轻低配板内存压力 +- **`OGSCOPE_MIRROR`**:`auto`(默认,按 `LANG`/`LC_*` 与系统时区启发)、`cn`(中国大陆镜像:apt 清华源 + PyPI 清华)、`international`(不替换 apt,PyPI 走默认)。在国内但语言为英文时,请显式 `export OGSCOPE_MIRROR=cn`。 +- 创建 `logs`、`uploads`、`data/plate_solve` 等目录 - 生成/更新 `systemd` 服务(`ogscope.service`) -- 注入 `PYTHONPATH` 与 `LD_LIBRARY_PATH` +- 注入 `PYTHONPATH` 与 `LD_LIBRARY_PATH`(按实际存在的路径) - 启用服务开机自启 执行方式: @@ -49,10 +118,10 @@ chmod +x scripts/install.sh ./scripts/install.sh ``` -### 1.4 依赖维护建议 +### 1.5 依赖维护建议 - 保持 `poetry.lock` 与仓库同步 -- 每次上传较大改动后,执行一次 `poetry install` +- 每次上传较大改动后,在板上执行 `./scripts/board-update.sh`,或手动 `poetry install --only main` 后 `sudo systemctl restart ogscope` - 服务运行时优先使用固定虚拟环境解释器(见第 5 节) ## 2. 系统环境依赖(重点) @@ -82,6 +151,8 @@ sudo apt install -y \ - 这些系统路径默认不一定在 Poetry 虚拟环境的 `sys.path` 中 - 结果是:服务运行于虚拟环境时,可能找不到 `picamera2` 等系统包 +**与 `system-site-packages` 的关系**:启用后,venv 的 `sys.path` 会包含系统 site-packages,一般即可 `import picamera2`;`systemd` 里仍保留 `PYTHONPATH`,用于覆盖不同发行版下 `/usr/local/lib/python3.x/dist-packages` 等路径,二者叠加不冲突。 + 因此在服务配置中显式注入 `PYTHONPATH`,将系统 Python 包路径加入解释器搜索路径,例如: ```ini @@ -107,6 +178,12 @@ Environment=LD_LIBRARY_PATH=/usr/lib/aarch64-linux-gnu - `scripts/install.sh` - 作用:安装依赖并生成 service - 状态:安装辅助脚本,不是运行时自动调用入口 +- `scripts/board-update.sh` + - 作用:已安装环境下的增量更新(可选 `OGSCOPE_GIT_PULL=1` 执行 `git pull`、`poetry install`、重启 `ogscope`) + - 状态:日常部署推荐入口 +- `scripts/uninstall.sh` + - 作用:停止并移除 `ogscope` systemd 单元、可选删除 `.venv`;默认保留 `logs/`、`data/` 等;需确认(交互输入 `YES` 或 `OGSCOPE_UNINSTALL_CONFIRM=1`) + - 状态:卸载辅助脚本;不卸载系统 apt 包与全局 Poetry - `scripts/start_debug_console.sh` - 作用:手动设置 `PYTHONPATH`/`LD_LIBRARY_PATH` 后前台启动 - 状态:手动调试辅助脚本,不是默认生产启动链路 @@ -162,14 +239,24 @@ sudo systemctl status ogscope ### 6.2 日常代码更新(推荐) -代码更新后(`git pull` 或手动上传)执行以下流程: +代码更新后(`git pull` 或手动上传)可一键执行(镜像策略与 `install.sh` 相同,通过 `OGSCOPE_MIRROR` 控制): + +```bash +cd /path/to/OGScope +chmod +x scripts/board-update.sh +# 若需先拉取远端代码(仅 git 仓库):OGSCOPE_GIT_PULL=1 ./scripts/board-update.sh +# 中国大陆:OGSCOPE_MIRROR=cn ./scripts/board-update.sh +./scripts/board-update.sh +``` + +或手动执行: ```bash # 进入项目目录 cd /path/to/OGScope -# 同步依赖(有 pyproject.toml/poetry.lock 变更时必须执行) -poetry install +# 同步依赖(有 pyproject.toml/poetry.lock 变更时必须执行;板端建议仅 main) +poetry install --no-interaction --only main # 重启服务使新代码生效 sudo systemctl restart ogscope @@ -184,6 +271,49 @@ sudo journalctl -u ogscope -f - 若仅前端模板/静态文件变更,通常不需要 `poetry install` - 若服务文件配置有改动,需先 `sudo systemctl daemon-reload` +### 6.3 卸载服务与本地环境(`scripts/uninstall.sh`) + +在需要**移除 systemd 服务**、清理项目内 **`.venv`**,或换目录重装时使用 `scripts/uninstall.sh`。脚本**不会**卸载系统已通过 `apt` 安装的包(如 `python3-picamera2`),也**不会**卸载用户级全局 **Poetry**;仅处理 OGScope 服务单元与项目目录内可选内容。 + +**会执行的操作 / What it does** + +- `systemctl stop` / `disable` `ogscope` +- 删除 `/etc/systemd/system/ogscope.service`(若存在),并 `daemon-reload` +- 默认删除项目根目录下的 **`.venv`**(可用环境变量保留,见下) + +**默认保留 / Kept by default** + +- `logs/`、`uploads/`、`data/`(含 `data/plate_solve` 等);若需一并删除,须显式开启(见下) + +**环境变量 / Environment** + +| 变量 | 含义 | +|------|------| +| `OGSCOPE_UNINSTALL_CONFIRM=1` | **非交互场景必须设置**(如 CI、脚本),否则脚本在非 TTY 下直接退出 | +| `OGSCOPE_UNINSTALL_KEEP_VENV=1` | 保留 `.venv`,不删除虚拟环境 | +| `OGSCOPE_UNINSTALL_REMOVE_DATA=1` | **危险**:删除 `logs/`、`uploads/`、`data/`(含星库等用户数据) | + +**交互确认 / Interactive**:在终端前台运行时,若未设置 `OGSCOPE_UNINSTALL_CONFIRM=1`,需输入全大写 **`YES`** 才会继续。 + +```bash +cd /path/to/OGScope +chmod +x scripts/uninstall.sh + +# 交互:按提示输入 YES +./scripts/uninstall.sh + +# 非交互:确认后执行 +OGSCOPE_UNINSTALL_CONFIRM=1 ./scripts/uninstall.sh + +# 保留虚拟环境,仅移除服务 +OGSCOPE_UNINSTALL_CONFIRM=1 OGSCOPE_UNINSTALL_KEEP_VENV=1 ./scripts/uninstall.sh + +# 同时删除日志与数据目录(慎用) +OGSCOPE_UNINSTALL_CONFIRM=1 OGSCOPE_UNINSTALL_REMOVE_DATA=1 ./scripts/uninstall.sh +``` + +卸载后若需再次部署,重新执行 `./scripts/install.sh` 即可。 + ## 7. PyCharm 远程开发(当前实践) 当前采用的是 **“本地 IDE 编辑 + 手动部署到开发板”** 模式,而不是由 IDE 直接接管远程运行。 @@ -273,11 +403,12 @@ router.include_router(new_router, tags=["NewModule - 新模块"]) - `PYTHONPATH` 是否包含系统 `dist-packages` - `LD_LIBRARY_PATH` 是否包含 `libcamera` 相关库路径 - 最近代码上传是否完整,依赖是否已重新安装 +- **`No module named 'scipy'`**:`board-update.sh` / `install.sh` 会在 `poetry install` 后校验并自动 `--no-cache` 重试与 pip 补装;若仍失败,删除 `.venv` 后执行 `OGSCOPE_MIRROR=cn ./scripts/board-update.sh`(或重装 `./scripts/install.sh`) ## 11. 常用命令速查 ```bash -# 安装/更新依赖 +# 安装/更新依赖(开发机);板端可用 ./scripts/board-update.sh poetry install # 前台手动启动(调试时) @@ -287,4 +418,8 @@ poetry run python -m ogscope.main sudo systemctl restart ogscope sudo systemctl status ogscope sudo journalctl -u ogscope -f + +# 卸载服务与 .venv(详见 §6.3;需确认或 OGSCOPE_UNINSTALL_CONFIRM=1) +# ./scripts/uninstall.sh +# OGSCOPE_UNINSTALL_CONFIRM=1 ./scripts/uninstall.sh ``` diff --git a/docs/development/README_EN.md b/docs/development/README_EN.md index 0f7b63b..8f830fc 100644 --- a/docs/development/README_EN.md +++ b/docs/development/README_EN.md @@ -12,6 +12,61 @@ Recommended workflow: **edit locally -> upload to board -> restart with `systemd` -> verify**. This matches real hardware runtime behavior. +## 0. Quick deployment checklist (hobbyists) + +This section lists **common commands and checks only**. For Poetry/PEP 668, mirror options, uninstall, and troubleshooting details, see **§1–§11** below. + +### 0.1 Requirements + +- Board: **ARM** (`aarch64` or `armhf`), e.g. Raspberry Pi / Orange Pi +- OS: **Debian/apt**-based images (compatible with `picamera2`/`libcamera`; install script reads `/etc/os-release`, see **§1.4**) +- Python: **3.10+** (see `pyproject.toml`) +- Network: first install downloads dependencies; Web UI needs **TCP 8000** reachable (configure firewall as needed) + +### 0.2 First-time install + +```bash +cd /path/to/OGScope +chmod +x scripts/install.sh +./scripts/install.sh +``` + +Summary: default `poetry install --only main`; in mainland China use **`export OGSCOPE_MIRROR=cn`**; low-memory boards: **`OGSCOPE_APT_SLOW=1`**. Full options: **§1.4**. After install: `sudo systemctl start ogscope`. + +### 0.3 Plate-solve data + +Place **`default_database.npz`** under **`data/plate_solve/`** (not shipped in the repo). See [plate-solve-data.md](plate-solve-data.md). + +### 0.4 Routine updates + +```bash +cd /path/to/OGScope +chmod +x scripts/board-update.sh +# optional: OGSCOPE_GIT_PULL=1 OGSCOPE_MIRROR=cn +./scripts/board-update.sh +``` + +Details: **§6.2**. + +### 0.5 Uninstall and health check + +- Remove service and `.venv`: **§6.3** (`scripts/uninstall.sh`) +- Health and logs: + +```bash +curl -s http://127.0.0.1:8000/health +sudo systemctl status ogscope +sudo journalctl -u ogscope -f +``` + +### 0.6 Troubleshooting (short) + +| Symptom | Where to look | +|---------|----------------| +| `ImportError: picamera2` | Install camera stack with `apt`; venv from `install.sh` (**§1.2, §3**) | +| PEP 668 | Use project `.venv` only; do not mix into system Python (**§1.2**) | +| Service fails to start | `WorkingDirectory`, `ExecStart`, `journalctl` (**§10**) | + ## 1. Python Version and Project Dependencies ### 1.1 Python baseline @@ -20,23 +75,45 @@ This matches real hardware runtime behavior. - Recommended board runtime: Python 3.10+ - Any `3.9+` wording in old docs should be treated as historical -### 1.2 Install Poetry and Python packages +### 1.2 Poetry, PEP 668, and the virtual environment (required reading) + +- **You must use a Poetry-managed project venv** (`.venv`). Do **not** set + `virtualenvs.create false` globally and mix packages into the system Python; + that leads to **PEP 668** errors (distribution-managed site-packages cannot be + modified by `pip`/`poetry`). +- On the board, run `scripts/install.sh` to set `virtualenvs.create true`, + `virtualenvs.in-project true`, and preferably + **`virtualenvs.options.system-site-packages true`** so the venv can import + `apt`-installed `picamera2`. +- **Production defaults** to runtime-only deps: `poetry install --only main` + (script default). For pytest and dev tools, set `OGSCOPE_INSTALL_DEV=1` on a + dev machine or board and reinstall. + +### 1.3 Install Poetry and Python packages ```bash cd /path/to/OGScope curl -sSL https://install.python-poetry.org | python3 - export PATH="$HOME/.local/bin:$PATH" +# dev machine: full dependency set including dev poetry install +# board (manual): match install.sh default +# poetry install --no-interaction --only main ``` -### 1.3 Install script (recommended for first-time setup) +### 1.4 Install script (recommended for first-time setup) The repository provides `scripts/install.sh`. It performs initial board setup: +- reads `/etc/os-release` and **only supports Debian/Ubuntu family** (including **Raspberry Pi OS**); aborts on other distros for safety - installs system dependencies and Poetry -- installs project dependencies +- configures Poetry for `.venv` and `system-site-packages` (when supported) +- defaults to `poetry install --only main` (set `OGSCOPE_INSTALL_DEV=1` for dev) +- optional `OGSCOPE_APT_SLOW=1`: stagger `apt` and pause between batches on low-memory boards +- **`OGSCOPE_MIRROR`**: `auto` (default, heuristic from `LANG`/`LC_*` and timezone), `cn` (mainland China mirrors for apt + PyPI via Tsinghua), `international` (do not rewrite apt; default PyPI). If you are in China but use `en_US` locale, set `export OGSCOPE_MIRROR=cn`. +- creates `logs`, `uploads`, `data/plate_solve`, etc. - creates/updates `systemd` service (`ogscope.service`) -- injects `PYTHONPATH` and `LD_LIBRARY_PATH` +- injects `PYTHONPATH` and `LD_LIBRARY_PATH` (paths that exist) - enables service autostart Run: @@ -47,10 +124,11 @@ chmod +x scripts/install.sh ./scripts/install.sh ``` -### 1.4 Dependency maintenance +### 1.5 Dependency maintenance - keep `poetry.lock` in sync with the repository -- run `poetry install` after significant code updates +- after updates on the board, run `./scripts/board-update.sh`, or + `poetry install --only main` then `sudo systemctl restart ogscope` - prefer a fixed venv Python in service startup (see section 5) ## 2. System Dependencies (Important) @@ -84,6 +162,11 @@ This is a key runtime detail for this project. Result: service may fail to import packages such as `picamera2`. +**Relationship to `system-site-packages`**: when enabled, the venv `sys.path` +includes system site-packages, which is usually enough to `import picamera2`. +`PYTHONPATH` in `systemd` still covers distro-specific paths such as +`/usr/local/lib/python3.x/dist-packages`; both layers work together. + So the service explicitly injects `PYTHONPATH`, for example: ```ini @@ -109,6 +192,15 @@ Environment=LD_LIBRARY_PATH=/usr/lib/aarch64-linux-gnu - `scripts/install.sh` - purpose: setup/install and create service - status: installer utility, not a runtime auto-invoked entrypoint +- `scripts/board-update.sh` + - purpose: incremental update after install (optional `OGSCOPE_GIT_PULL=1` for + `git pull`, `poetry install`, restart `ogscope`) + - status: recommended for routine deployment +- `scripts/uninstall.sh` + - purpose: stop and remove `ogscope` systemd unit, optionally remove `.venv`; + keeps `logs/`, `data/` by default; requires confirmation (`YES` or + `OGSCOPE_UNINSTALL_CONFIRM=1`) + - status: uninstall helper; does not remove apt packages or global Poetry - `scripts/start_debug_console.sh` - purpose: foreground run with `PYTHONPATH`/`LD_LIBRARY_PATH` - status: manual debug helper, not default production startup @@ -164,11 +256,20 @@ sudo systemctl status ogscope ### 6.2 Daily update flow -After code updates (`git pull` or manual upload), run: +After code updates (`git pull` or manual upload), you can run: ```bash cd /path/to/OGScope -poetry install +chmod +x scripts/board-update.sh +# with git and need pull: OGSCOPE_GIT_PULL=1 ./scripts/board-update.sh +./scripts/board-update.sh +``` + +Or manually: + +```bash +cd /path/to/OGScope +poetry install --no-interaction --only main sudo systemctl restart ogscope sudo systemctl status ogscope sudo journalctl -u ogscope -f @@ -179,6 +280,49 @@ Notes: - if only templates/static files changed, `poetry install` is usually not needed - if service file changed, run `sudo systemctl daemon-reload` first +### 6.3 Uninstall service and local environment (`scripts/uninstall.sh`) + +Use `scripts/uninstall.sh` when you need to **remove the systemd unit**, delete the project **`.venv`**, or clean up before reinstalling in another directory. The script **does not** remove packages installed with `apt` (e.g. `python3-picamera2`) or the user-level **Poetry** installation; it only manages the OGScope service file and optional content under the project tree. + +**What it does** + +- `systemctl stop` / `disable` `ogscope` +- removes `/etc/systemd/system/ogscope.service` if present, then `daemon-reload` +- by default removes `.venv` at the project root (can be kept; see below) + +**Kept by default** + +- `logs/`, `uploads/`, `data/` (including `data/plate_solve/`); to remove them you must opt in (below) + +**Environment variables** + +| Variable | Meaning | +|----------|---------| +| `OGSCOPE_UNINSTALL_CONFIRM=1` | **Required for non-interactive** runs (CI, scripts); without it, the script exits when stdin is not a TTY | +| `OGSCOPE_UNINSTALL_KEEP_VENV=1` | keep `.venv` | +| `OGSCOPE_UNINSTALL_REMOVE_DATA=1` | **dangerous**: deletes `logs/`, `uploads/`, `data/` (user data including plate DB) | + +**Interactive**: if `OGSCOPE_UNINSTALL_CONFIRM=1` is not set and the session is a TTY, type **`YES`** in full caps to proceed. + +```bash +cd /path/to/OGScope +chmod +x scripts/uninstall.sh + +# Interactive: type YES when prompted +./scripts/uninstall.sh + +# Non-interactive +OGSCOPE_UNINSTALL_CONFIRM=1 ./scripts/uninstall.sh + +# Remove service only, keep venv +OGSCOPE_UNINSTALL_CONFIRM=1 OGSCOPE_UNINSTALL_KEEP_VENV=1 ./scripts/uninstall.sh + +# Also remove logs and data (use with care) +OGSCOPE_UNINSTALL_CONFIRM=1 OGSCOPE_UNINSTALL_REMOVE_DATA=1 ./scripts/uninstall.sh +``` + +To deploy again after uninstall, run `./scripts/install.sh`. + ## 7. PyCharm Remote Development (Current Practice) Current practice is **local IDE editing + manual deployment to board**, not @@ -264,13 +408,19 @@ If service fails to start, check: - `PYTHONPATH` includes system `dist-packages` - `LD_LIBRARY_PATH` includes `libcamera` library path - code upload is complete and dependencies are installed +- **`No module named 'scipy'`**: `board-update.sh` / `install.sh` verify imports after `poetry install` and retry with `--no-cache` plus a pip fallback; if it still fails, remove `.venv` and run `OGSCOPE_MIRROR=cn ./scripts/board-update.sh` (or `./scripts/install.sh`) ## 11. Command Cheatsheet ```bash +# dev machine; on board use ./scripts/board-update.sh poetry install poetry run python -m ogscope.main sudo systemctl restart ogscope sudo systemctl status ogscope sudo journalctl -u ogscope -f + +# Uninstall service and .venv (see §6.3; requires confirm or OGSCOPE_UNINSTALL_CONFIRM=1) +# ./scripts/uninstall.sh +# OGSCOPE_UNINSTALL_CONFIRM=1 ./scripts/uninstall.sh ``` diff --git a/docs/development/plate-solve-data.md b/docs/development/plate-solve-data.md new file mode 100644 index 0000000..25caac8 --- /dev/null +++ b/docs/development/plate-solve-data.md @@ -0,0 +1,59 @@ +# 星空解算数据维护方案 / Plate solve data maintenance + +本文说明 OGScope 在移除 SQLite/HYG 星表后,**仅依赖 Tetra3(Cedar-Solve)图案库** `default_database.npz` 的部署、备份与排障。 + +## 1. 数据形态 / What the file is + +- **不是**关系型数据库:无 `stars.db`、无 HYG CSV 索引。 +- **`default_database.npz`** 为 NumPy 压缩归档,内含: + - 预计算的 **四星图案哈希表**(用于 lost-in-space 匹配) + - **星表向量**(球面 KD 树等),与构建时所用的 Hipparcos/Tycho/BSC 等相关 +- 运行时由 `tetra3.Tetra3` 加载到内存;解算结果中的 `tetra` 字段会附带 Tetra 原始输出(含 `status`、`Matches`、`RMSE` 等)。 + +源码中 vendored 包位置:`ogscope/vendor/tetra3/`(Apache-2.0,见 `ogscope/vendor/tetra3/LICENSE.txt`)。 + +## 2. 获取图案库文件 / Obtaining `default_database.npz` + +任选其一: + +1. **从 PyPI `cedar-solve` wheel 提取** + 安装 wheel 后,在 site-packages 中查找 `tetra3/data/default_database.npz`,复制到 `data/plate_solve/`。 +2. **自行生成**(换 FOV、极限星等时) + 使用 `tetra3.Tetra3.generate_database()`,并按上游文档准备 `hip_main` / `tyc_main` / `BSC5` 等星表文件(生成耗时可能很长)。 + +## 3. 配置与部署 / Configuration + +| 方式 | 说明 | +|------|------| +| 默认 | 若存在 `data/plate_solve/default_database.npz`,优先通过配置解析为该路径 | +| `OGSCOPE_PLATE_SOLVE_DIR` | 图案库目录(默认 `./data/plate_solve`) | +| `OGSCOPE_SOLVER_TETRA_DATABASE_PATH` | `default_database.npz` 的**绝对路径**(最高优先级) | + +应用配置项见 `ogscope/config.py`:`plate_solve_dir`、`solver_tetra_database_path`、`solver_fov_deg`、`solver_fov_max_error_deg`、`solver_timeout_ms`。 + +**systemd** 部署时:将 `WorkingDirectory` 设为项目根,并确保 `data/plate_solve/default_database.npz` 存在或环境变量指向设备上可读路径。 + +## 4. 版本与备份 / Versioning and backup + +- 在 `poetry.lock` 或发行说明中**记录**与 `ogscope/vendor/tetra3` 对齐的 Cedar-Solve / Tetra3 版本思路(当前为 vendored 快照)。 +- **备份**:对生产用 `default_database.npz` 保留副本(可按文件大小 + SHA256 校验)。 +- **升级**:替换 `.npz` 后重启服务;建议在板子上用调试页上传测试图验证 `status: MATCH_FOUND`。 + +## 5. 与旧方案关系 / Migration from SQLite catalog + +- 已移除:`/api/catalog`、`HYG` 下载、`stars.db`、调试页星表 CRUD。 +- 解算置信度不再来自「本地星表密度」,而来自 Tetra 的 `Prob`、`Matches`、`RMSE` 等。 + +## 6. 故障排查 / Troubleshooting + +| 现象 | 可能原因 | 处理 | +|------|-----------|------| +| `DATABASE_ERROR` / 无法加载 | `.npz` 缺失或路径错误 | 检查文件与 `OGSCOPE_SOLVER_TETRA_DATABASE_PATH` | +| `TOO_FEW` | 检出星点 < 4 | 曝光/阈值、减少云与前景光 | +| `NO_MATCH` / `TIMEOUT` | FOV 与库不匹配、假星多 | 调整 `solver_fov_deg`、星点提取、`solve_timeout_ms` | +| 窄视场长期失败 | 默认库偏 10°–30° 一类 | 使用 `generate_database` 生成匹配 FOV 的库 | + +## 7. 性能提示 / Performance + +- Orange Pi 等资源受限设备:可适当**降低分辨率**、限制 `solver_max_stars`、拉大 `solver_fullsolve_interval_frames`(实时模式)。 +- Tetra 解算在后台线程执行,避免阻塞事件循环(见 `asyncio.to_thread`)。 diff --git a/ogscope/__init__.py b/ogscope/__init__.py index 191fc54..7508b6a 100644 --- a/ogscope/__init__.py +++ b/ogscope/__init__.py @@ -4,7 +4,14 @@ 基于 Orange Pi Zero 2W 和 IMX327 的智能极轴校准系统 """ +# 使 vendored tetra3 可被 import / Make vendored tetra3 importable +import sys +from pathlib import Path + +_vendor_root = Path(__file__).resolve().parent / "vendor" +if _vendor_root.is_dir() and str(_vendor_root) not in sys.path: + sys.path.insert(0, str(_vendor_root)) + from ogscope.__version__ import __version__ __all__ = ["__version__"] - diff --git a/ogscope/__version__.py b/ogscope/__version__.py index 9834682..bbc8789 100644 --- a/ogscope/__version__.py +++ b/ogscope/__version__.py @@ -1,4 +1,3 @@ """版本信息 / Version information""" __version__ = "0.1.0" - diff --git a/ogscope/algorithms/plate_solve/__init__.py b/ogscope/algorithms/plate_solve/__init__.py index 2d9fcfb..82276d8 100644 --- a/ogscope/algorithms/plate_solve/__init__.py +++ b/ogscope/algorithms/plate_solve/__init__.py @@ -2,6 +2,22 @@ 星图解算模块导出 / Plate solving module exports """ -from ogscope.algorithms.plate_solve.solver import PlateSolver, SolveResult +from ogscope.algorithms.plate_solve.solver import ( + CentroidExtractionParams, + PlateSolver, + SolveResult, + centroid_extraction_preview, + merge_centroid_params, + reset_tetra3_singleton_for_tests, + resize_bgr_for_extraction, +) -__all__ = ["PlateSolver", "SolveResult"] +__all__ = [ + "CentroidExtractionParams", + "PlateSolver", + "SolveResult", + "centroid_extraction_preview", + "merge_centroid_params", + "reset_tetra3_singleton_for_tests", + "resize_bgr_for_extraction", +] diff --git a/ogscope/algorithms/plate_solve/solver.py b/ogscope/algorithms/plate_solve/solver.py index 411238c..7eae0f6 100644 --- a/ogscope/algorithms/plate_solve/solver.py +++ b/ogscope/algorithms/plate_solve/solver.py @@ -1,90 +1,685 @@ """ -简化星图解算器 / Simplified plate solver +基于 Tetra3 (Cedar-Solve) 的星图解算 / Plate solving via Tetra3 (Cedar-Solve) """ from __future__ import annotations -from dataclasses import dataclass -from math import cos, radians +import base64 +import dataclasses +import threading +import time +from dataclasses import dataclass, field +from pathlib import Path from typing import Any +import cv2 import numpy as np +from PIL import Image from ogscope.algorithms.star_extract import StarPoint -from ogscope.data.catalog.service import catalog_service +from ogscope.config import Settings, get_settings + +_STATUS_NAMES: dict[int, str] = { + 1: "MATCH_FOUND", + 2: "NO_MATCH", + 3: "TIMEOUT", + 4: "CANCELLED", + 5: "TOO_FEW", +} + + +def _json_safe(obj: Any) -> Any: + """将 numpy 标量/数组等转为 JSON/FastAPI 可序列化类型 / JSON-serializable conversion for API.""" + if obj is None or isinstance(obj, (str, bool, int, float)): + return obj + if isinstance(obj, np.generic): + return obj.item() + if isinstance(obj, np.ndarray): + return obj.tolist() + if isinstance(obj, dict): + return {str(k): _json_safe(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple)): + return [_json_safe(x) for x in obj] + if isinstance(obj, set): + return [_json_safe(x) for x in obj] + return obj + + +_tetra_lock = threading.Lock() +_tetra_instance: Any = None +_tetra_load_key: str | None = None + + +def _resolve_database_path(settings: Settings) -> Path | str: + """选择图案库路径:环境配置 > data/plate_solve > 包内默认名 / Resolve pattern DB path.""" + if settings.solver_tetra_database_path is not None: + return settings.solver_tetra_database_path.expanduser().resolve() + candidate = settings.plate_solve_dir / "default_database.npz" + if candidate.is_file(): + return candidate + return "default_database" + + +def _get_tetra3(settings: Settings) -> Any: + """懒加载单例 / Lazy singleton Tetra3.""" + global _tetra_instance, _tetra_load_key + key = str(_resolve_database_path(settings)) + with _tetra_lock: + if _tetra_instance is not None and _tetra_load_key == key: + return _tetra_instance + from tetra3 import Tetra3 # noqa: PLC0415 — after vendor path + + load_arg: Path | str = _resolve_database_path(settings) + _tetra_instance = Tetra3(load_arg) + _tetra_load_key = key + return _tetra_instance + + +def reset_tetra3_singleton_for_tests() -> None: + """测试用:清空单例 / Tests: clear singleton.""" + global _tetra_instance, _tetra_load_key + with _tetra_lock: + _tetra_instance = None + _tetra_load_key = None + + +@dataclass(slots=True) +class CentroidExtractionParams: + """Tetra3 提星参数 / Parameters for get_centroids_from_image.""" + + sigma: float = 2.5 + max_area: int = 400 + min_area: int = 5 + filtsize: int = 25 + binary_open: bool = True + bg_sub_mode: str = "local_mean" + sigma_mode: str = "global_root_square" + max_axis_ratio: float | None = None + + @classmethod + def from_settings(cls, settings: Settings) -> CentroidExtractionParams: + """从应用配置构造 / Build from application settings.""" + return cls( + sigma=settings.solver_centroid_sigma, + max_area=settings.solver_centroid_max_area, + min_area=settings.solver_centroid_min_area, + filtsize=settings.solver_centroid_filtsize, + binary_open=settings.solver_centroid_binary_open, + bg_sub_mode=settings.solver_centroid_bg_sub_mode, + sigma_mode=settings.solver_centroid_sigma_mode, + max_axis_ratio=settings.solver_centroid_max_axis_ratio, + ) + + def to_get_centroids_kwargs(self) -> dict[str, Any]: + """传给 get_centroids_from_image 的关键字 / Keyword args for Tetra3 centroiding.""" + kwargs: dict[str, Any] = { + "filtsize": self.filtsize, + "bg_sub_mode": self.bg_sub_mode, + "sigma_mode": self.sigma_mode, + "sigma": self.sigma, + "binary_open": self.binary_open, + "max_area": self.max_area, + "min_area": self.min_area, + } + if self.max_axis_ratio is not None: + kwargs["max_axis_ratio"] = self.max_axis_ratio + return kwargs + + +def merge_centroid_params( + base: CentroidExtractionParams, + overrides: dict[str, Any], +) -> CentroidExtractionParams: + """用非 None 字段覆盖 base / Overlay non-None keys onto base.""" + allowed = {f.name for f in dataclasses.fields(CentroidExtractionParams)} + filtered = {k: v for k, v in overrides.items() if k in allowed and v is not None} + return dataclasses.replace(base, **filtered) + + +def resize_bgr_for_extraction( + frame_bgr: np.ndarray, max_image_side: int +) -> tuple[np.ndarray, tuple[int, int]]: + """与解算相同的缩放,返回 (BGR, 原始高宽) / Same resize as solve; returns BGR and original shape.""" + h0, w0 = int(frame_bgr.shape[0]), int(frame_bgr.shape[1]) + img = frame_bgr + if max(h0, w0) > max_image_side: + sc = max_image_side / float(max(h0, w0)) + img = cv2.resize( + frame_bgr, + (max(1, int(w0 * sc)), max(1, int(h0 * sc))), + interpolation=cv2.INTER_AREA, + ) + return img, (h0, w0) + + +def subtract_large_scale_background_bgr( + frame_bgr: np.ndarray, + *, + downsample_max_side: int, +) -> np.ndarray: + """低分辨率估计大尺度背景并做亮度校正,减轻角部光晕等渐变 / Fast large-scale flat removal. + + 在小图上高斯平滑得到低频背景,上采样后与灰度相减,再按比例映射回 BGR,便于 Tetra3 提星。 + Estimates low-frequency background on a downscaled image, subtracts in luminance, scales RGB. + """ + if frame_bgr.ndim != 3 or frame_bgr.shape[2] != 3: + return frame_bgr + h, w = int(frame_bgr.shape[0]), int(frame_bgr.shape[1]) + gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY).astype(np.float32) + side = max(h, w) + sc = min(1.0, float(downsample_max_side) / float(side)) + sw = max(1, int(round(w * sc))) + sh = max(1, int(round(h * sc))) + small = cv2.resize(gray, (sw, sh), interpolation=cv2.INTER_AREA) + sigma_s = max(2.0, float(min(sw, sh)) / 32.0) + bg_small = cv2.GaussianBlur(small, (0, 0), sigmaX=sigma_s, sigmaY=sigma_s) + bg = cv2.resize(bg_small, (w, h), interpolation=cv2.INTER_LINEAR).astype(np.float32) + mean_gray = float(np.mean(gray)) + corr = gray - bg + mean_gray + corr = np.clip(corr, 1e-3, 255.0) + ratio = corr / np.maximum(gray, 1e-3) + ratio = np.clip(ratio, 0.0, 4.0) + out = frame_bgr.astype(np.float32) * ratio[..., np.newaxis] + return np.clip(np.round(out), 0, 255).astype(np.uint8) + + +def centroid_extraction_preview( + frame_bgr: np.ndarray, + *, + max_stars: int, + centroid_params: CentroidExtractionParams, + max_image_side: int, + large_scale_bg_subtract: bool = False, + downsample_max_side: int = 256, +) -> dict[str, Any]: + """提星预览:二值掩膜 PNG(base64),不解算 Tetra3 / Preview extraction mask without plate solve.""" + from tetra3 import get_centroids_from_image # noqa: PLC0415 + + img, (h0, w0) = resize_bgr_for_extraction(frame_bgr, max_image_side) + if large_scale_bg_subtract: + img = subtract_large_scale_background_bgr( + img, downsample_max_side=downsample_max_side + ) + height, width = int(img.shape[0]), int(img.shape[1]) + rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + pil_image = Image.fromarray(rgb) + kwargs = centroid_params.to_get_centroids_kwargs() + t0 = time.perf_counter() + try: + centroids, images_dict = get_centroids_from_image( + pil_image, + max_returned=max_stars, + return_images=True, + **kwargs, + ) + except (OSError, ValueError, RuntimeError) as exc: + return { + "success": False, + "error": str(exc), + "detected_stars": 0, + "t_extract_ms": None, + "binary_mask_png_base64": None, + "solve_width": width, + "solve_height": height, + "original_width": w0, + "original_height": h0, + } + t_extract_ms = (time.perf_counter() - t0) * 1000.0 + detected = int(len(centroids)) + mask = images_dict.get("binary_mask") + b64: str | None = None + if mask is not None: + mask_u8 = (np.asarray(mask, dtype=np.uint8) * 255).astype(np.uint8) + ok, buf = cv2.imencode(".png", mask_u8) + if ok: + b64 = base64.b64encode(buf.tobytes()).decode("ascii") + return { + "success": True, + "detected_stars": detected, + "t_extract_ms": round(t_extract_ms, 3), + "binary_mask_png_base64": b64, + "solve_width": width, + "solve_height": height, + "original_width": w0, + "original_height": h0, + } @dataclass(slots=True) class SolveResult: - """解算结果 / Solving result""" + """解算结果(Tetra3)/ Tetra3 solving result""" ra_deg: float dec_deg: float - confidence: float - solve_source: str - matched_catalog_stars: int detected_stars: int + solve_source: str + status: str + status_code: int | None + roll_deg: float | None + fov_deg: float | None + matches: int | None + prob: float | None + rmse_arcsec: float | None + t_solve_ms: float | None + t_extract_ms: float | None + t_preprocess_ms: float | None + large_scale_bg_subtract: bool = False + raw: dict[str, Any] = field(default_factory=dict) + # 原图像素系下的叠加数据(与 Canvas x,y 一致)/ Overlay in original image pixels (Canvas x,y) + solve_overlay: dict[str, Any] | None = None def to_dict(self) -> dict[str, Any]: - return { + base = { "ra_deg": self.ra_deg, "dec_deg": self.dec_deg, - "confidence": self.confidence, - "solve_source": self.solve_source, - "matched_catalog_stars": self.matched_catalog_stars, "detected_stars": self.detected_stars, + "solve_source": self.solve_source, + "status": self.status, + "status_code": self.status_code, + "roll_deg": self.roll_deg, + "fov_deg": self.fov_deg, + "matches": self.matches, + "prob": self.prob, + "rmse_arcsec": self.rmse_arcsec, + "t_solve_ms": self.t_solve_ms, + "t_extract_ms": self.t_extract_ms, + "t_preprocess_ms": self.t_preprocess_ms, + "large_scale_bg_subtract": self.large_scale_bg_subtract, } + if self.solve_overlay is not None: + base["solve_overlay"] = _json_safe(self.solve_overlay) + if self.raw: + # Tetra3 原始字段含 numpy 标量(如 uint16),FastAPI 无法直接 JSON 编码 / Tetra3 raw may contain numpy scalars + base["tetra"] = _json_safe(self.raw) + return base class PlateSolver: - """基于提示位姿与星表密度的轻量解算 / Lightweight solver with hint and catalog density""" + """Tetra3 星图解算封装 / Tetra3 plate solver wrapper""" - def __init__(self, fov_deg: float = 16.0) -> None: + def __init__( + self, + fov_deg: float = 16.0, + fov_max_error_deg: float | None = None, + solve_timeout_ms: int = 8000, + ) -> None: self.fov_deg = fov_deg + self.fov_max_error_deg = fov_max_error_deg + self.solve_timeout_ms = solve_timeout_ms + + def _tetra(self) -> Any: + return _get_tetra3(get_settings()) def solve( self, stars: list[StarPoint], frame_shape: tuple[int, ...], - hint_ra_deg: float, - hint_dec_deg: float, + hint_ra_deg: float = 0.0, + hint_dec_deg: float = 0.0, solve_source: str = "full", + fov_estimate: float | None = None, + fov_max_error: float | None = None, + solve_timeout_ms: int | None = None, ) -> SolveResult: - """解算画面中心坐标 / Solve frame center coordinate""" - if not stars: + """解算画面中心赤道坐标 / Solve frame center RA/Dec. + + hint_ra_deg / hint_dec_deg 对 Tetra3 无影响,仅保留 API 兼容 / Hints ignored by Tetra3. + """ + del hint_ra_deg, hint_dec_deg + height, width = int(frame_shape[0]), int(frame_shape[1]) + fov_est = float(fov_estimate if fov_estimate is not None else self.fov_deg) + fov_err = fov_max_error if fov_max_error is not None else self.fov_max_error_deg + timeout = float( + solve_timeout_ms if solve_timeout_ms is not None else self.solve_timeout_ms + ) + + if len(stars) < 4: + sorted_stars = sorted(stars, key=lambda s: s.flux, reverse=True) + cyx = np.array([[s.y, s.x] for s in sorted_stars], dtype=np.float64) + overlay = ( + _make_solve_overlay({}, cyx, (height, width), (height, width)) + if len(cyx) > 0 + else None + ) return SolveResult( - ra_deg=hint_ra_deg % 360.0, - dec_deg=float(np.clip(hint_dec_deg, -90.0, 90.0)), - confidence=0.0, + ra_deg=0.0, + dec_deg=0.0, + detected_stars=len(stars), solve_source=solve_source, - matched_catalog_stars=0, + status="TOO_FEW", + status_code=5, + roll_deg=None, + fov_deg=None, + matches=None, + prob=None, + rmse_arcsec=None, + t_solve_ms=None, + t_extract_ms=None, + t_preprocess_ms=None, + large_scale_bg_subtract=False, + raw={"reason": "need_at_least_4_stars"}, + solve_overlay=overlay, + ) + + sorted_stars = sorted(stars, key=lambda s: s.flux, reverse=True) + centroids = np.array([[s.y, s.x] for s in sorted_stars], dtype=np.float64) + + try: + t3 = self._tetra() + out = t3.solve_from_centroids( + centroids, + (height, width), + fov_estimate=fov_est, + fov_max_error=fov_err, + solve_timeout=timeout, + return_matches=True, + ) + except OSError as exc: + return SolveResult( + ra_deg=0.0, + dec_deg=0.0, + detected_stars=len(stars), + solve_source=solve_source, + status="DATABASE_ERROR", + status_code=None, + roll_deg=None, + fov_deg=None, + matches=None, + prob=None, + rmse_arcsec=None, + t_solve_ms=None, + t_extract_ms=None, + t_preprocess_ms=None, + large_scale_bg_subtract=False, + raw={"error": str(exc)}, + ) + + return _tetra_dict_to_result( + out, + len(stars), + solve_source, + centroids_yx=centroids, + frame_shape_original=(height, width), + solve_shape=(height, width), + ) + + def solve_from_bgr_frame( + self, + frame_bgr: np.ndarray, + max_stars: int, + hint_ra_deg: float = 0.0, + hint_dec_deg: float = 0.0, + solve_source: str = "full", + fov_estimate: float | None = None, + fov_max_error: float | None = None, + solve_timeout_ms: int | None = None, + max_image_side: int | None = None, + centroid_params: CentroidExtractionParams | None = None, + large_scale_bg_subtract: bool = False, + ) -> SolveResult: + """与 Tetra3 ``solve_from_image`` 等价:内置 ``get_centroids_from_image`` + ``solve_from_centroids``. + + Cedar-Solve / 官方示例走此提星链(局部背景减除、σ 阈值、连通域矩心),非 OpenCV OTSU。 + Same pipeline as Tetra3 ``solve_from_image`` (local bg, sigma threshold, scipy labeling). + 可选在提星前做大尺度背景减除(角部光晕等)/ Optional large-scale BG flattening before centroiding. + """ + del hint_ra_deg, hint_dec_deg + from tetra3 import get_centroids_from_image # noqa: PLC0415 — vendor path + + settings = get_settings() + side_cap = ( + int(max_image_side) + if max_image_side is not None + else int(settings.solver_max_image_side) + ) + params = ( + centroid_params + if centroid_params is not None + else CentroidExtractionParams.from_settings(settings) + ) + t0_preprocess = time.perf_counter() + img, (h0, w0) = resize_bgr_for_extraction(frame_bgr, side_cap) + if large_scale_bg_subtract: + img = subtract_large_scale_background_bgr( + img, + downsample_max_side=int(settings.solver_large_scale_bg_downsample), + ) + height, width = int(img.shape[0]), int(img.shape[1]) + rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + pil_image = Image.fromarray(rgb) + t_preprocess_ms = (time.perf_counter() - t0_preprocess) * 1000.0 + + fov_est = float(fov_estimate if fov_estimate is not None else self.fov_deg) + fov_err = fov_max_error if fov_max_error is not None else self.fov_max_error_deg + timeout = float( + solve_timeout_ms if solve_timeout_ms is not None else self.solve_timeout_ms + ) + + centroid_kw = params.to_get_centroids_kwargs() + t0 = time.perf_counter() + try: + centroids = get_centroids_from_image( + pil_image, + max_returned=max_stars, + **centroid_kw, + ) + except (OSError, ValueError, RuntimeError) as exc: + return SolveResult( + ra_deg=0.0, + dec_deg=0.0, detected_stars=0, + solve_source=solve_source, + status="EXTRACTION_ERROR", + status_code=None, + roll_deg=None, + fov_deg=None, + matches=None, + prob=None, + rmse_arcsec=None, + t_solve_ms=None, + t_extract_ms=None, + t_preprocess_ms=t_preprocess_ms, + large_scale_bg_subtract=large_scale_bg_subtract, + raw={"error": str(exc)}, + ) + t_extract_ms = (time.perf_counter() - t0) * 1000.0 + + detected = int(len(centroids)) + if detected < 4: + cyx = np.asarray(centroids, dtype=np.float64) + overlay = ( + _make_solve_overlay({}, cyx, (h0, w0), (height, width)) + if len(cyx) > 0 + else None + ) + return SolveResult( + ra_deg=0.0, + dec_deg=0.0, + detected_stars=detected, + solve_source=solve_source, + status="TOO_FEW", + status_code=5, + roll_deg=None, + fov_deg=None, + matches=None, + prob=None, + rmse_arcsec=None, + t_solve_ms=None, + t_extract_ms=t_extract_ms, + t_preprocess_ms=t_preprocess_ms, + large_scale_bg_subtract=large_scale_bg_subtract, + raw={"reason": "need_at_least_4_stars"}, + solve_overlay=overlay, ) - height, width = frame_shape[:2] - points = np.array([[s.x, s.y, s.flux] for s in stars], dtype=np.float64) - flux = np.clip(points[:, 2], 1e-6, None) - centroid_x = float(np.average(points[:, 0], weights=flux)) - centroid_y = float(np.average(points[:, 1], weights=flux)) - dx = centroid_x - (width / 2.0) - dy = centroid_y - (height / 2.0) - - deg_per_pixel = self.fov_deg / max(width, 1) - dec = float(np.clip(hint_dec_deg - (dy * deg_per_pixel), -90.0, 90.0)) - cos_dec = max(0.01, cos(radians(dec))) - ra = (hint_ra_deg + (dx * deg_per_pixel / cos_dec)) % 360.0 - - nearby_catalog = catalog_service.load_records_for_region(ra_deg=ra, search_bins=1) - expected_stars = max(1, len(nearby_catalog)) - detected_stars = len(stars) - matched = min(detected_stars, expected_stars) - confidence = min(1.0, detected_stars / expected_stars) - - return SolveResult( - ra_deg=ra, - dec_deg=dec, - confidence=float(confidence), - solve_source=solve_source, - matched_catalog_stars=matched, - detected_stars=detected_stars, + try: + t3 = self._tetra() + out = t3.solve_from_centroids( + centroids, + (height, width), + fov_estimate=fov_est, + fov_max_error=fov_err, + solve_timeout=timeout, + return_matches=True, + ) + except OSError as exc: + return SolveResult( + ra_deg=0.0, + dec_deg=0.0, + detected_stars=detected, + solve_source=solve_source, + status="DATABASE_ERROR", + status_code=None, + roll_deg=None, + fov_deg=None, + matches=None, + prob=None, + rmse_arcsec=None, + t_solve_ms=None, + t_extract_ms=t_extract_ms, + t_preprocess_ms=t_preprocess_ms, + large_scale_bg_subtract=large_scale_bg_subtract, + raw={"error": str(exc)}, + ) + + out["T_extract"] = t_extract_ms + out["T_preprocess"] = t_preprocess_ms + return _tetra_dict_to_result( + out, + detected, + solve_source, + centroids_yx=np.asarray(centroids, dtype=np.float64), + frame_shape_original=(h0, w0), + solve_shape=(height, width), + large_scale_bg_subtract=large_scale_bg_subtract, + ) + + +def _make_solve_overlay( + tetra_out: dict[str, Any], + centroids_yx: np.ndarray, + frame_shape_original: tuple[int, int], + solve_shape: tuple[int, int], +) -> dict[str, Any] | None: + """从 Tetra 输出与质心构造 solve_overlay(原图 x,y 像素)/ Build solve_overlay in original pixels.""" + h0, w0 = int(frame_shape_original[0]), int(frame_shape_original[1]) + h1, w1 = int(solve_shape[0]), int(solve_shape[1]) + if h1 <= 0 or w1 <= 0: + return None + sx = w0 / float(w1) + sy = h0 / float(h1) + + stars_all: list[dict[str, float]] = [] + arr = np.asarray(centroids_yx, dtype=np.float64) + if arr.size > 0 and arr.ndim == 2 and arr.shape[1] >= 2: + for row in arr: + y_s, x_s = float(row[0]), float(row[1]) + stars_all.append({"x": x_s * sx, "y": y_s * sy}) + + stars_matched: list[dict[str, Any]] = [] + raw_matched = tetra_out.get("matched_centroids") + raw_catalog = tetra_out.get("matched_stars") + raw_cat_ids = tetra_out.get("matched_catID") + if raw_matched: + for i, mc in enumerate(raw_matched): + y_s, x_s = float(mc[0]), float(mc[1]) + entry: dict[str, Any] = {"x": x_s * sx, "y": y_s * sy} + if raw_catalog is not None and i < len(raw_catalog): + ms = raw_catalog[i] + entry["ra_deg"] = float(ms[0]) + entry["dec_deg"] = float(ms[1]) + entry["mag"] = float(ms[2]) + if raw_cat_ids is not None and i < len(raw_cat_ids): + cid = raw_cat_ids[i] + entry["cat_id"] = _json_safe(cid) if cid is not None else None + stars_matched.append(entry) + + stars_pattern: list[dict[str, float]] = [] + raw_pat = tetra_out.get("pattern_centroids") + if raw_pat: + for pc in raw_pat: + y_s, x_s = float(pc[0]), float(pc[1]) + stars_pattern.append({"x": x_s * sx, "y": y_s * sy}) + + return { + "frame_shape": [h0, w0], + "stars_matched": stars_matched, + "stars_pattern": stars_pattern, + "stars_all_centroids": stars_all, + } + + +def _tetra_dict_to_result( + out: dict[str, Any], + detected_stars: int, + solve_source: str, + *, + centroids_yx: np.ndarray | None = None, + frame_shape_original: tuple[int, int] | None = None, + solve_shape: tuple[int, int] | None = None, + large_scale_bg_subtract: bool = False, +) -> SolveResult: + """Tetra 返回 dict → SolveResult / Map Tetra output dict to SolveResult.""" + st = out.get("status") + code = int(st) if st is not None else None + name = _STATUS_NAMES.get(code, str(st)) if code is not None else "UNKNOWN" + + ra = out.get("RA") + dec = out.get("Dec") + ra_f = float(ra) if ra is not None else 0.0 + dec_f = float(dec) if dec is not None else 0.0 + + raw = {k: v for k, v in out.items() if k not in ("RA", "Dec")} + + overlay: dict[str, Any] | None = None + if ( + centroids_yx is not None + and frame_shape_original is not None + and solve_shape is not None + ): + overlay = _make_solve_overlay( + out, centroids_yx, frame_shape_original, solve_shape ) + + return SolveResult( + ra_deg=ra_f, + dec_deg=dec_f, + detected_stars=detected_stars, + solve_source=solve_source, + status=name, + status_code=code, + roll_deg=_maybe_float(out.get("Roll")), + fov_deg=_maybe_float(out.get("FOV")), + matches=_maybe_int(out.get("Matches")), + prob=_maybe_float(out.get("Prob")), + rmse_arcsec=_maybe_float(out.get("RMSE")), + t_solve_ms=_maybe_float(out.get("T_solve")), + t_extract_ms=_maybe_float(out.get("T_extract")), + t_preprocess_ms=_maybe_float(out.get("T_preprocess")), + large_scale_bg_subtract=large_scale_bg_subtract, + raw=raw, + solve_overlay=overlay, + ) + + +def warmup_tetra3() -> None: + """预热 Tetra3 单例与数据库,降低首轮解算延迟 / Warm up Tetra3 singleton to reduce first-solve latency.""" + _get_tetra3(get_settings()) + + +def _maybe_float(v: Any) -> float | None: + if v is None: + return None + try: + return float(v) + except (TypeError, ValueError): + return None + + +def _maybe_int(v: Any) -> int | None: + if v is None: + return None + try: + return int(v) + except (TypeError, ValueError): + return None diff --git a/ogscope/algorithms/star_extract/extractor.py b/ogscope/algorithms/star_extract/extractor.py index 78bd2f8..79ebc22 100644 --- a/ogscope/algorithms/star_extract/extractor.py +++ b/ogscope/algorithms/star_extract/extractor.py @@ -4,6 +4,8 @@ from __future__ import annotations +import heapq +import math from dataclasses import dataclass from typing import Any @@ -27,6 +29,18 @@ def to_dict(self) -> dict[str, Any]: class StarExtractor: """简单星点提取 / Lightweight star extraction""" + # 大图先缩小再提星,降低内存与轮廓数量 / Downscale before extract (RAM + contour count on SBCs) + _max_input_side: int = 1920 + # 椒盐噪声多时 OTSU 轮廓可上万,逐轮廓整幅 mask 会 OOM;过多则缩小重试 / Too many contours → downscale & retry + _max_contours_before_downscale: int = 6000 + _min_side_for_downscale: int = 400 + # 几何过滤:与噪点体积区分,减轻 Tetra3 假星导致的 TIMEOUT / Reject noise blobs vs point-like stars + _min_star_area: float = 2.0 + _max_star_area_frac: float = ( + 0.0035 # 单连通域面积不超过画幅比例 / Max contour area vs frame + ) + _min_circularity: float = 0.12 # 4πA/P²;细长热噪、条纹偏低 / Elongated junk is low + def __init__(self, max_stars: int = 80) -> None: self.max_stars = max_stars @@ -37,25 +51,69 @@ def extract(self, frame: np.ndarray) -> list[StarPoint]: else: gray = frame.copy() - # 使用高斯模糊降低高频噪声 / Apply gaussian blur for high-frequency noise + h, w = gray.shape[:2] + side = max(h, w) + if side > self._max_input_side: + scale0 = self._max_input_side / float(side) + gray = cv2.resize( + gray, + (max(1, int(w * scale0)), max(1, int(h * scale0))), + interpolation=cv2.INTER_AREA, + ) + return self._extract_gray_scaled(gray, scale=1.0) + + def _extract_gray_scaled(self, gray: np.ndarray, scale: float) -> list[StarPoint]: + """在灰度图上提星;scale 为相对原图的坐标倍率 / Extract on gray; scale maps coords to original frame.""" blur = cv2.GaussianBlur(gray, (3, 3), 0) - _, binary = cv2.threshold( - blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU - ) + _, binary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + # 去掉孤立椒盐点,减少伪轮廓 / Morph open removes salt noise, fewer false contours + _k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2)) + binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, _k) + contours, _ = cv2.findContours( binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) + h, w = gray.shape[:2] + if ( + len(contours) > self._max_contours_before_downscale + and min(h, w) > self._min_side_for_downscale + ): + small = cv2.resize( + gray, + (max(1, w // 2), max(1, h // 2)), + interpolation=cv2.INTER_AREA, + ) + return self._extract_gray_scaled(small, scale * 2.0) + + # 已缩到最小仍极多轮廓时只保留面积最大的一批,避免 OOM / Cap contour count if still huge + if len(contours) > self._max_contours_before_downscale: + contours = heapq.nlargest( + self._max_contours_before_downscale, + contours, + key=cv2.contourArea, + ) + + frame_px = float(h * w) + max_area = self._max_star_area_frac * frame_px + points: list[StarPoint] = [] for contour in contours: area = float(cv2.contourArea(contour)) - if area <= 0.5: + if area < self._min_star_area: + continue + if area > max_area: continue + peri = float(cv2.arcLength(contour, True)) + if peri > 0.0: + circ = (4.0 * math.pi * area) / (peri * peri) + if circ < self._min_circularity: + continue m = cv2.moments(contour) if m["m00"] <= 0: continue - cx = float(m["m10"] / m["m00"]) - cy = float(m["m01"] / m["m00"]) + cx = float(m["m10"] / m["m00"]) * scale + cy = float(m["m01"] / m["m00"]) * scale mask = np.zeros_like(gray, dtype=np.uint8) cv2.drawContours(mask, [contour], -1, color=255, thickness=-1) flux = float(cv2.mean(gray, mask=mask)[0] * area) diff --git a/ogscope/algorithms/star_match/tracker.py b/ogscope/algorithms/star_match/tracker.py index c6e025a..5687027 100644 --- a/ogscope/algorithms/star_match/tracker.py +++ b/ogscope/algorithms/star_match/tracker.py @@ -33,15 +33,19 @@ def to_dict(self) -> dict[str, Any]: class FastTracker: """基于质心偏移的轻量跟踪 / Lightweight tracking based on centroid shift""" - def track( - self, previous: list[StarPoint], current: list[StarPoint] - ) -> TrackResult: + def track(self, previous: list[StarPoint], current: list[StarPoint]) -> TrackResult: """估计帧间位移 / Estimate inter-frame shift""" if not previous or not current: - return TrackResult(delta_x=0.0, delta_y=0.0, matched_points=0, confidence=0.0) + return TrackResult( + delta_x=0.0, delta_y=0.0, matched_points=0, confidence=0.0 + ) - prev = np.array([[p.x, p.y, max(p.flux, 1e-6)] for p in previous], dtype=np.float64) - cur = np.array([[p.x, p.y, max(p.flux, 1e-6)] for p in current], dtype=np.float64) + prev = np.array( + [[p.x, p.y, max(p.flux, 1e-6)] for p in previous], dtype=np.float64 + ) + cur = np.array( + [[p.x, p.y, max(p.flux, 1e-6)] for p in current], dtype=np.float64 + ) prev_cx = float(np.average(prev[:, 0], weights=prev[:, 2])) prev_cy = float(np.average(prev[:, 1], weights=prev[:, 2])) cur_cx = float(np.average(cur[:, 0], weights=cur[:, 2])) diff --git a/ogscope/config.py b/ogscope/config.py index b5388b8..c358291 100644 --- a/ogscope/config.py +++ b/ogscope/config.py @@ -1,6 +1,7 @@ """ 配置管理模块 """ + from functools import lru_cache from pathlib import Path from typing import Optional @@ -11,85 +12,153 @@ class Settings(BaseSettings): """应用配置 / Application configuration""" - + # 基础配置 / Basic configuration environment: str = Field(default="development", description="运行环境") debug: bool = Field(default=True, description="调试模式") - + # Web 服务配置 / Web service configuration host: str = Field(default="0.0.0.0", description="Web 服务地址") port: int = Field(default=8000, description="Web 服务端口") reload: bool = Field(default=True, description="代码变更时自动重载") - + # 日志配置 / Log configuration log_level: str = Field(default="INFO", description="日志级别") log_file: Optional[Path] = Field(default=None, description="日志文件路径") - + # 相机配置 / Camera configuration camera_type: str = Field(default="imx327_mipi", description="相机类型: usb/csi/spi") - camera_width: int = Field(default=640, description="图像宽度") - camera_height: int = Field(default=360, description="图像高度") - camera_fps: int = Field(default=15, description="帧率") + camera_width: int = Field( + default=1600, description="图像宽度 / Default capture width" + ) + camera_height: int = Field( + default=900, description="图像高度 / Default capture height" + ) + camera_fps: int = Field( + default=5, description="预览与调试默认帧率 / Default preview FPS" + ) camera_sampling_mode: str = Field( default="native", description="采样模式: supersample/native/crop" ) camera_exposure: int = Field(default=10000, description="曝光时间(us)") camera_gain: float = Field(default=1.0, description="增益") - + # 显示屏配置 / Display configuration display_enabled: bool = Field(default=False, description="启用 SPI 屏幕") display_type: str = Field(default="st7789", description="显示屏类型") display_width: int = Field(default=240, description="屏幕宽度") display_height: int = Field(default=320, description="屏幕高度") display_rotation: int = Field(default=0, description="屏幕旋转角度") - + # 极轴校准配置 / Polar calibration configuration polar_align_timeout: int = Field(default=300, description="校准超时时间(秒)") polar_align_precision: float = Field(default=1.0, description="校准精度(角分)") - + # 数据库配置 / Database configuration database_url: str = Field( - default="sqlite:///./ogscope.db", - description="数据库连接字符串" + default="sqlite:///./ogscope.db", description="数据库连接字符串" ) - + # 文件路径配置 / File path configuration data_dir: Path = Field(default=Path("./data"), description="数据目录") upload_dir: Path = Field(default=Path("./uploads"), description="上传目录") analysis_dir: Path = Field( default=Path("./data/analysis"), description="分析任务目录" ) - catalog_dir: Path = Field(default=Path("./data/catalog"), description="星表目录") + plate_solve_dir: Path = Field( + default=Path("./data/plate_solve"), + description="Tetra3 图案库目录 / Tetra3 pattern database directory", + ) + solver_tetra_database_path: Optional[Path] = Field( + default=None, + description="default_database.npz 绝对路径;None 则使用 vendor 内 data/default_database.npz / Absolute path to default_database.npz", + ) + solver_fov_max_error_deg: Optional[float] = Field( + default=None, + description="FOV 估计允许误差(度);None 为库默认 / Max FOV estimate error in degrees", + ) + solver_timeout_ms: int = Field( + default=1500, + description="Tetra3 单次解算超时毫秒 / Tetra3 solve timeout in ms", + ) static_dir: Path = Field(default=Path("./web/static"), description="静态文件目录") template_dir: Path = Field(default=Path("./web/templates"), description="模板目录") # 星图解算配置 / Plate solving configuration solver_hint_ra_deg: float = Field(default=0.0, description="默认解算RA提示(度)") solver_hint_dec_deg: float = Field(default=90.0, description="默认解算Dec提示(度)") - solver_fov_deg: float = Field(default=16.0, description="视场角(度)") + solver_fov_deg: float = Field( + default=11.0, description="视场角(度) / Default FOV estimate (deg)" + ) solver_max_stars: int = Field(default=80, description="用于解算的最大星点数量") solver_fullsolve_interval_frames: int = Field( default=10, description="实时模式全量解算间隔帧数" ) - + # Tetra3 get_centroids_from_image 默认(可环境覆盖)/ Defaults for centroid extraction + solver_centroid_sigma: float = Field( + default=2.5, + description="σ 阈值倍数;略高可减少假星 / Sigma multiplier for thresholding", + ) + solver_centroid_max_area: int = Field( + default=400, + description="连通域最大像素面积;过小会丢掉亮星光晕 / Max spot area in pixels", + ) + solver_centroid_min_area: int = Field( + default=5, + description="连通域最小像素面积 / Min spot area in pixels", + ) + solver_centroid_filtsize: int = Field( + default=25, + description="局部背景/噪声滤波边长,须为奇数 / Local filter size (odd)", + ) + solver_centroid_binary_open: bool = Field( + default=True, + description="二值开运算去噪 / Binary opening on threshold mask", + ) + solver_centroid_bg_sub_mode: str = Field( + default="local_mean", + description="背景扣除模式 / Background subtraction mode (Tetra3)", + ) + solver_centroid_sigma_mode: str = Field( + default="global_root_square", + description="噪声 σ 估计模式 / Noise sigma mode (Tetra3)", + ) + solver_centroid_max_axis_ratio: Optional[float] = Field( + default=None, + description="长细比上限;None 为不限制 / Max major/minor axis ratio, None to disable", + ) + solver_max_image_side: int = Field( + default=1600, + description="提星前长边上限(像素),与默认采集长边对齐 / Max long side before extraction", + ) + solver_large_scale_bg_downsample: int = Field( + default=256, + ge=32, + le=2048, + description="大尺度背景减除:小图长边上限(像素),越小越快 / Large-scale BG downsample max side", + ) + star_analysis_target_fps: float = Field( + default=1.5, + description="星空分析目标帧率(1–2),仅用于前端节流 / Target star-analysis FPS for UI throttle", + ) + model_config = SettingsConfigDict( env_file=".env", env_file_encoding="utf-8", env_prefix="OGSCOPE_", case_sensitive=False, ) - + def __init__(self, **kwargs): super().__init__(**kwargs) # 创建必要的目录 / Create necessary directories self.data_dir.mkdir(parents=True, exist_ok=True) self.upload_dir.mkdir(parents=True, exist_ok=True) self.analysis_dir.mkdir(parents=True, exist_ok=True) - self.catalog_dir.mkdir(parents=True, exist_ok=True) + self.plate_solve_dir.mkdir(parents=True, exist_ok=True) -@lru_cache() +@lru_cache def get_settings() -> Settings: """获取配置单例 / Get configuration singleton""" return Settings() - diff --git a/ogscope/core/realtime/service.py b/ogscope/core/realtime/service.py index e40a392..798ab86 100644 --- a/ogscope/core/realtime/service.py +++ b/ogscope/core/realtime/service.py @@ -10,7 +10,6 @@ from ogscope.algorithms.plate_solve import PlateSolver, SolveResult from ogscope.algorithms.star_extract import StarExtractor, StarPoint -from ogscope.algorithms.star_match import FastTracker from ogscope.config import get_settings from ogscope.web.api.debug.services import DebugCameraService @@ -27,30 +26,47 @@ class RealtimeState: class RealtimeSolveService: - """实时解算器 / Realtime solver""" + """实时解算器:周期性 Tetra3 全量解算 / Realtime solver with periodic Tetra3""" def __init__(self) -> None: settings = get_settings() self.extractor = StarExtractor(max_stars=settings.solver_max_stars) - self.solver = PlateSolver(fov_deg=settings.solver_fov_deg) - self.tracker = FastTracker() + self.solver = PlateSolver( + fov_deg=settings.solver_fov_deg, + fov_max_error_deg=settings.solver_fov_max_error_deg, + solve_timeout_ms=settings.solver_timeout_ms, + ) self.state = RealtimeState() self._task: asyncio.Task[None] | None = None self._previous_stars: list[StarPoint] | None = None self._hint_ra = settings.solver_hint_ra_deg self._hint_dec = settings.solver_hint_dec_deg self._fullsolve_interval = max(1, settings.solver_fullsolve_interval_frames) + self._fov_estimate: float | None = None + self._fov_max_error: float | None = None + self._solve_timeout_ms: int | None = None async def start( - self, hint_ra_deg: float | None = None, hint_dec_deg: float | None = None + self, + hint_ra_deg: float | None = None, + hint_dec_deg: float | None = None, + fov_estimate: float | None = None, + fov_max_error: float | None = None, + solve_timeout_ms: int | None = None, ) -> dict[str, Any]: """启动实时解算 / Start realtime solving""" if self.state.running: - return {"success": True, "message": "实时解算已在运行 / Realtime solver already running"} + return { + "success": True, + "message": "实时解算已在运行 / Realtime solver already running", + } if hint_ra_deg is not None: self._hint_ra = hint_ra_deg if hint_dec_deg is not None: self._hint_dec = hint_dec_deg + self._fov_estimate = fov_estimate + self._fov_max_error = fov_max_error + self._solve_timeout_ms = solve_timeout_ms self.state = RealtimeState(running=True) self._previous_stars = None self._task = asyncio.create_task(self._loop()) @@ -98,40 +114,36 @@ async def _loop(self) -> None: or self._previous_stars is None ) if use_fullsolve: - solved = self.solver.solve( - stars=stars, - frame_shape=frame.shape, - hint_ra_deg=self._hint_ra, - hint_dec_deg=self._hint_dec, - solve_source="full", + solved = await asyncio.to_thread( + self._solve_frame_sync, + frame, + stars, ) self._apply_solve_result(solved) self.state.fullsolve_count += 1 - else: - track = self.tracker.track(self._previous_stars or [], stars) - deg_per_px = self.solver.fov_deg / max(frame.shape[1], 1) - self._hint_dec = float( - max(-90.0, min(90.0, self._hint_dec - track.delta_y * deg_per_px)) - ) - self._hint_ra = float((self._hint_ra + track.delta_x * deg_per_px) % 360.0) - solved = self.solver.solve( - stars=stars, - frame_shape=frame.shape, - hint_ra_deg=self._hint_ra, - hint_dec_deg=self._hint_dec, - solve_source="track", - ) - base = solved.to_dict() - base["track"] = track.to_dict() - self.state.last_result = base - self._hint_ra = solved.ra_deg - self._hint_dec = solved.dec_deg self._previous_stars = stars await asyncio.sleep(0.02) except Exception as exc: # noqa: BLE001 self.state.last_error = str(exc) await asyncio.sleep(0.1) + def _solve_frame_sync( + self, + frame: Any, + stars: list[StarPoint], + ) -> SolveResult: + """同步解算单帧(线程池中调用)/ Sync solve for one frame.""" + return self.solver.solve( + stars=stars, + frame_shape=frame.shape, + hint_ra_deg=self._hint_ra, + hint_dec_deg=self._hint_dec, + solve_source="realtime", + fov_estimate=self._fov_estimate, + fov_max_error=self._fov_max_error, + solve_timeout_ms=self._solve_timeout_ms, + ) + def _apply_solve_result(self, solved: SolveResult) -> None: """写入解算结果 / Persist solve result""" self.state.last_result = solved.to_dict() diff --git a/ogscope/data/catalog/__init__.py b/ogscope/data/catalog/__init__.py deleted file mode 100644 index 24969d3..0000000 --- a/ogscope/data/catalog/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -""" -星表数据模块导出 / Catalog data module exports -""" - -from ogscope.data.catalog.service import CatalogService, catalog_service - -__all__ = ["CatalogService", "catalog_service"] diff --git a/ogscope/data/catalog/service.py b/ogscope/data/catalog/service.py deleted file mode 100644 index d4aaeae..0000000 --- a/ogscope/data/catalog/service.py +++ /dev/null @@ -1,770 +0,0 @@ -""" -星表数据服务 / Catalog data service -""" - -from __future__ import annotations - -import csv -import hashlib -import json -import logging -import sqlite3 -import subprocess -from dataclasses import dataclass -from datetime import datetime, timezone -from math import cos, radians -from pathlib import Path -from typing import Any -from urllib.request import urlretrieve - -from ogscope.config import get_settings - -logger = logging.getLogger(__name__) - - -@dataclass(slots=True) -class CatalogRecord: - """星表记录 / Catalog record""" - - source_id: str - ra: float - dec: float - pmra: float - pmdec: float - phot_g_mean_mag: float - name_en: str - name_zh: str - description_en: str - description_zh: str - - @classmethod - def from_row(cls, row: dict[str, str]) -> "CatalogRecord": - return cls( - source_id=row["source_id"], - ra=float(row["ra"]), - dec=float(row["dec"]), - pmra=float(row.get("pmra", 0.0)), - pmdec=float(row.get("pmdec", 0.0)), - phot_g_mean_mag=float(row["phot_g_mean_mag"]), - name_en=row.get("name_en", row["source_id"]), - name_zh=row.get("name_zh", row.get("name_en", row["source_id"])), - description_en=row.get("description_en", ""), - description_zh=row.get("description_zh", ""), - ) - - def to_dict(self) -> dict[str, Any]: - return { - "source_id": self.source_id, - "ra": self.ra, - "dec": self.dec, - "pmra": self.pmra, - "pmdec": self.pmdec, - "phot_g_mean_mag": self.phot_g_mean_mag, - "name_en": self.name_en, - "name_zh": self.name_zh, - "description_en": self.description_en, - "description_zh": self.description_zh, - } - - -class CatalogService: - """星表服务(SQLite 主存储) / Catalog service with SQLite primary storage""" - - RAW_FILE_NAME = "catalog_raw.csv" - MANIFEST_NAME = "manifest.json" - RAW_DIR_NAME = "raw" - META_DIR_NAME = "meta" - DB_FILE_NAME = "stars.db" - HYG_URL = ( - "https://raw.githubusercontent.com/astronexus/HYG-Database/main/" - "hyg/CURRENT/hygdata_v41.csv" - ) - - _COMMON_CN_NAMES: dict[str, str] = { - "sirius": "天狼星", - "canopus": "老人星", - "arcturus": "大角星", - "vega": "织女星", - "capella": "五车二", - "rigel": "参宿七", - "procyon": "南河三", - "betelgeuse": "参宿四", - "achernar": "水委一", - "hadar": "马腹一", - "altair": "牛郎星", - "aldebaran": "毕宿五", - "spica": "角宿一", - "antares": "心宿二", - "pollux": "北河三", - "fomalhaut": "北落师门", - "deneb": "天津四", - "regulus": "轩辕十四", - "castor": "北河二", - "bellatrix": "参宿五", - "mirfak": "天船三", - "alnilam": "参宿二", - "alnair": "鹤一", - "alioth": "玉衡", - "dubhe": "天枢", - "merak": "天璇", - "polaris": "北极星", - } - - _SEED_ROWS: tuple[dict[str, str], ...] = ( - {"source_id": "hip11767", "ra": "37.95456067", "dec": "89.26410897", "pmra": "44.22", "pmdec": "-11.74", "phot_g_mean_mag": "1.97"}, - {"source_id": "hip32349", "ra": "101.28715533", "dec": "-16.71611586", "pmra": "-546.01", "pmdec": "-1223.07", "phot_g_mean_mag": "-1.46"}, - {"source_id": "hip30438", "ra": "95.98787778", "dec": "-52.69571722", "pmra": "19.93", "pmdec": "23.24", "phot_g_mean_mag": "-0.72"}, - {"source_id": "hip69673", "ra": "213.91530029", "dec": "19.18240917", "pmra": "-1093.39", "pmdec": "-1999.85", "phot_g_mean_mag": "0.03"}, - {"source_id": "hip71683", "ra": "219.89972883", "dec": "-60.83514707", "pmra": "-3606.35", "pmdec": "686.92", "phot_g_mean_mag": "-0.27"}, - {"source_id": "hip91262", "ra": "279.23473479", "dec": "38.78368896", "pmra": "200.94", "pmdec": "286.23", "phot_g_mean_mag": "0.03"}, - {"source_id": "hip113368", "ra": "344.41269272", "dec": "-29.62223628", "pmra": "329.95", "pmdec": "-164.67", "phot_g_mean_mag": "1.16"}, - {"source_id": "hip21421", "ra": "68.98016279", "dec": "16.50930235", "pmra": "24.95", "pmdec": "-14.53", "phot_g_mean_mag": "0.85"}, - {"source_id": "hip65474", "ra": "201.29824762", "dec": "-11.16132218", "pmra": "-109.23", "pmdec": "-73.36", "phot_g_mean_mag": "0.98"}, - {"source_id": "hip80763", "ra": "247.35191583", "dec": "-26.43200231", "pmra": "-8.53", "pmdec": "-23.85", "phot_g_mean_mag": "1.06"}, - ) - - def __init__(self) -> None: - settings = get_settings() - self.catalog_dir = settings.catalog_dir - self.raw_dir = self.catalog_dir / self.RAW_DIR_NAME - self.meta_dir = self.catalog_dir / self.META_DIR_NAME - self.db_path = self.catalog_dir / self.DB_FILE_NAME - self._ensure_dirs() - self._init_db() - - @property - def raw_file(self) -> Path: - return self.raw_dir / self.RAW_FILE_NAME - - @property - def manifest_file(self) -> Path: - return self.meta_dir / self.MANIFEST_NAME - - def reconfigure_storage(self, catalog_dir: Path) -> None: - """重新配置存储路径 / Reconfigure storage paths""" - self.catalog_dir = catalog_dir - self.raw_dir = self.catalog_dir / self.RAW_DIR_NAME - self.meta_dir = self.catalog_dir / self.META_DIR_NAME - self.db_path = self.catalog_dir / self.DB_FILE_NAME - self._ensure_dirs() - self._init_db() - - def download_catalog( - self, source: str = "seed", url: str | None = None, magnitude_limit: float = 8.5 - ) -> dict[str, Any]: - """下载或生成星表,并导入数据库 / Download or generate catalog and import to DB""" - if source == "seed": - self._write_seed_catalog(self.raw_file, magnitude_limit=magnitude_limit) - elif source == "hyg": - target_url = url or self.HYG_URL - if target_url.endswith(".gz"): - gz_path = self.raw_dir / "hyg_catalog.csv.gz" - self._download_file(target_url, gz_path) - self._gunzip_file(gz_path, self.raw_file) - else: - self._download_file(target_url, self.raw_file) - elif source == "url": - if not url: - raise ValueError("url source 模式必须提供 URL / URL is required for url source") - self._download_file(url, self.raw_file) - else: - raise ValueError("不支持的 source,允许 seed / hyg / url / Unsupported source") - - imported_count = self._import_csv_to_db(self.raw_file, magnitude_limit, source) - self._set_meta("source", source) - self._set_meta("magnitude_limit", str(magnitude_limit)) - self._set_meta("source_sha256", self._sha256_of_file(self.raw_file)) - self._set_meta("status", "imported") - self._set_meta("last_download_at", datetime.now(timezone.utc).isoformat()) - return { - "success": True, - "source": source, - "path": str(self.raw_file), - "imported_count": imported_count, - "message": "星表已导入数据库 / Catalog imported into SQLite", - } - - def build_index( - self, magnitude_limit: float = 8.5, ra_bin_size_deg: float = 15.0 - ) -> dict[str, Any]: - """构建数据库索引与统计 / Build DB indexes and stats""" - with self._connect() as conn: - conn.execute("CREATE INDEX IF NOT EXISTS idx_stars_ra_now ON stars(ra_now)") - conn.execute("CREATE INDEX IF NOT EXISTS idx_stars_dec_now ON stars(dec_now)") - conn.execute( - "CREATE INDEX IF NOT EXISTS idx_stars_mag ON stars(phot_g_mean_mag)" - ) - conn.execute( - "CREATE INDEX IF NOT EXISTS idx_stars_source_id ON stars(source_id)" - ) - conn.execute("ANALYZE") - - count_row = conn.execute( - "SELECT COUNT(*) AS c FROM stars WHERE phot_g_mean_mag <= ?", - (magnitude_limit,), - ).fetchone() - record_count = int(count_row["c"]) if count_row else 0 - bucket_rows = conn.execute( - "SELECT CAST(ra_now / ? AS INTEGER) AS rb, COUNT(*) AS c " - "FROM stars WHERE phot_g_mean_mag <= ? GROUP BY rb", - (ra_bin_size_deg, magnitude_limit), - ).fetchall() - bucket_count = len(bucket_rows) - - now_iso = datetime.now(timezone.utc).isoformat() - manifest = { - "generated_at": now_iso, - "source_file": str(self.raw_file), - "db_path": str(self.db_path), - "magnitude_limit": magnitude_limit, - "ra_bin_size_deg": ra_bin_size_deg, - "record_count": record_count, - "bucket_count": bucket_count, - "source_sha256": self._meta("source_sha256", ""), - "epoch": "JNow(approx)", - "status": "ready", - } - self.manifest_file.write_text( - json.dumps(manifest, ensure_ascii=False, indent=2), encoding="utf-8" - ) - self._set_meta("status", "ready") - self._set_meta("ra_bin_size_deg", str(ra_bin_size_deg)) - self._set_meta("magnitude_limit", str(magnitude_limit)) - self._set_meta("last_build_at", now_iso) - return {"success": True, **manifest} - - def get_status(self) -> dict[str, Any]: - """获取星表状态 / Get catalog status""" - with self._connect() as conn: - row = conn.execute("SELECT COUNT(*) AS c FROM stars").fetchone() - total_count = int(row["c"]) if row else 0 - ready = total_count > 0 and self._meta("status", "") in {"ready", "imported"} - return { - "ready": ready, - "status": self._meta("status", "empty"), - "catalog_dir": str(self.catalog_dir), - "db_path": str(self.db_path), - "source": self._meta("source", ""), - "magnitude_limit": float(self._meta("magnitude_limit", "8.5")), - "ra_bin_size_deg": float(self._meta("ra_bin_size_deg", "15.0")), - "last_download_at": self._meta("last_download_at", ""), - "last_build_at": self._meta("last_build_at", ""), - "record_count": total_count, - } - - def load_records_for_region( - self, ra_deg: float, search_bins: int = 1 - ) -> list[CatalogRecord]: - """按 RA 区域读取星点 / Load stars by RA region""" - if not self.get_status().get("ready"): - return [] - bin_size = float(self._meta("ra_bin_size_deg", "15.0")) - half_width = max(1.0, (search_bins + 1) * bin_size) - ra_center = ra_deg % 360.0 - ra_min = ra_center - half_width - ra_max = ra_center + half_width - - query = ( - "SELECT source_id, ra, dec, pmra, pmdec, phot_g_mean_mag, " - "name_en, name_zh, description_en, description_zh FROM stars " - "WHERE phot_g_mean_mag <= ? AND " - ) - mag_limit = float(self._meta("magnitude_limit", "8.5")) - params: tuple[float, ...] - if ra_min < 0: - query += "(ra_now >= ? OR ra_now <= ?) " - params = (mag_limit, 360.0 + ra_min, ra_max) - elif ra_max >= 360.0: - query += "(ra_now >= ? OR ra_now <= ?) " - params = (mag_limit, ra_min, ra_max - 360.0) - else: - query += "ra_now BETWEEN ? AND ? " - params = (mag_limit, ra_min, ra_max) - query += "ORDER BY phot_g_mean_mag ASC LIMIT 500" - - with self._connect() as conn: - rows = conn.execute(query, params).fetchall() - return [ - CatalogRecord( - source_id=str(row["source_id"]), - ra=float(row["ra"]), - dec=float(row["dec"]), - pmra=float(row["pmra"]), - pmdec=float(row["pmdec"]), - phot_g_mean_mag=float(row["phot_g_mean_mag"]), - name_en=str(row["name_en"]), - name_zh=str(row["name_zh"]), - description_en=str(row["description_en"]), - description_zh=str(row["description_zh"]), - ) - for row in rows - ] - - def list_stars( - self, - limit: int = 100, - offset: int = 0, - source_query: str | None = None, - min_mag: float | None = None, - max_mag: float | None = None, - ) -> dict[str, Any]: - """分页查询星点 / List stars with pagination""" - where: list[str] = [] - params: list[Any] = [] - if source_query: - where.append("source_id LIKE ?") - params.append(f"%{source_query}%") - if min_mag is not None: - where.append("phot_g_mean_mag >= ?") - params.append(min_mag) - if max_mag is not None: - where.append("phot_g_mean_mag <= ?") - params.append(max_mag) - where_clause = f"WHERE {' AND '.join(where)}" if where else "" - sql = ( - "SELECT source_id, ra, dec, pmra, pmdec, phot_g_mean_mag, " - "name_en, name_zh, description_en, description_zh, " - "ra_now, dec_now, updated_at " - f"FROM stars {where_clause} ORDER BY phot_g_mean_mag ASC LIMIT ? OFFSET ?" - ) - count_sql = f"SELECT COUNT(*) AS c FROM stars {where_clause}" - params_with_page = [*params, max(1, limit), max(0, offset)] - with self._connect() as conn: - rows = conn.execute(sql, params_with_page).fetchall() - count_row = conn.execute(count_sql, params).fetchone() - return { - "total": int(count_row["c"]) if count_row else 0, - "items": [dict(row) for row in rows], - } - - def get_star(self, source_id: str) -> dict[str, Any] | None: - """按 source_id 查询星点 / Get star by source_id""" - with self._connect() as conn: - row = conn.execute( - "SELECT source_id, ra, dec, pmra, pmdec, phot_g_mean_mag, " - "name_en, name_zh, description_en, description_zh, " - "ra_now, dec_now, updated_at " - "FROM stars WHERE source_id = ?", - (source_id,), - ).fetchone() - return dict(row) if row else None - - def create_star(self, payload: dict[str, Any]) -> dict[str, Any]: - """新增星点 / Create star""" - record = CatalogRecord( - source_id=str(payload["source_id"]), - ra=float(payload["ra"]), - dec=float(payload["dec"]), - pmra=float(payload.get("pmra", 0.0)), - pmdec=float(payload.get("pmdec", 0.0)), - phot_g_mean_mag=float(payload["phot_g_mean_mag"]), - name_en=str(payload.get("name_en", payload["source_id"])), - name_zh=str(payload.get("name_zh", payload.get("name_en", payload["source_id"]))), - description_en=str(payload.get("description_en", "")), - description_zh=str(payload.get("description_zh", "")), - ) - normalized = self._normalize_record_to_observation_epoch(record) - now_iso = datetime.now(timezone.utc).isoformat() - with self._connect() as conn: - conn.execute( - "INSERT INTO stars (source_id, ra, dec, pmra, pmdec, phot_g_mean_mag, " - "name_en, name_zh, description_en, description_zh, " - "ra_now, dec_now, created_at, updated_at) " - "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", - ( - normalized.source_id, - normalized.ra, - normalized.dec, - normalized.pmra, - normalized.pmdec, - normalized.phot_g_mean_mag, - normalized.name_en, - normalized.name_zh, - normalized.description_en, - normalized.description_zh, - normalized.ra, - normalized.dec, - now_iso, - now_iso, - ), - ) - result = self.get_star(normalized.source_id) - if not result: - raise ValueError("新增星点失败 / Failed to create star") - return result - - def update_star(self, source_id: str, payload: dict[str, Any]) -> dict[str, Any]: - """更新星点 / Update star""" - existing = self.get_star(source_id) - if not existing: - raise FileNotFoundError("星点不存在 / Star not found") - merged = { - "source_id": source_id, - "ra": payload.get("ra", existing["ra"]), - "dec": payload.get("dec", existing["dec"]), - "pmra": payload.get("pmra", existing["pmra"]), - "pmdec": payload.get("pmdec", existing["pmdec"]), - "phot_g_mean_mag": payload.get( - "phot_g_mean_mag", existing["phot_g_mean_mag"] - ), - "name_en": payload.get("name_en", existing["name_en"]), - "name_zh": payload.get("name_zh", existing["name_zh"]), - "description_en": payload.get("description_en", existing["description_en"]), - "description_zh": payload.get("description_zh", existing["description_zh"]), - } - normalized = self._normalize_record_to_observation_epoch( - CatalogRecord( - source_id=str(merged["source_id"]), - ra=float(merged["ra"]), - dec=float(merged["dec"]), - pmra=float(merged["pmra"]), - pmdec=float(merged["pmdec"]), - phot_g_mean_mag=float(merged["phot_g_mean_mag"]), - name_en=str(merged["name_en"]), - name_zh=str(merged["name_zh"]), - description_en=str(merged["description_en"]), - description_zh=str(merged["description_zh"]), - ) - ) - with self._connect() as conn: - conn.execute( - "UPDATE stars SET ra = ?, dec = ?, pmra = ?, pmdec = ?, phot_g_mean_mag = ?, " - "name_en = ?, name_zh = ?, description_en = ?, description_zh = ?, " - "ra_now = ?, dec_now = ?, updated_at = ? WHERE source_id = ?", - ( - normalized.ra, - normalized.dec, - normalized.pmra, - normalized.pmdec, - normalized.phot_g_mean_mag, - normalized.name_en, - normalized.name_zh, - normalized.description_en, - normalized.description_zh, - normalized.ra, - normalized.dec, - datetime.now(timezone.utc).isoformat(), - source_id, - ), - ) - result = self.get_star(source_id) - if not result: - raise ValueError("更新星点失败 / Failed to update star") - return result - - def delete_star(self, source_id: str) -> bool: - """删除星点 / Delete star""" - with self._connect() as conn: - cursor = conn.execute("DELETE FROM stars WHERE source_id = ?", (source_id,)) - return cursor.rowcount > 0 - - def _ensure_dirs(self) -> None: - self.catalog_dir.mkdir(parents=True, exist_ok=True) - self.raw_dir.mkdir(parents=True, exist_ok=True) - self.meta_dir.mkdir(parents=True, exist_ok=True) - - def _connect(self) -> sqlite3.Connection: - conn = sqlite3.connect(self.db_path) - conn.row_factory = sqlite3.Row - return conn - - def _init_db(self) -> None: - try: - self._create_or_migrate_db() - return - except sqlite3.DatabaseError as exc: - if not self._is_malformed_error(exc): - raise - logger.error( - "检测到星表数据库损坏,准备自动恢复 / Corrupted catalog DB detected, starting auto-recovery: %s", - exc, - ) - - self._recover_malformed_db() - self._create_or_migrate_db() - self._set_meta( - "recovered_from_corruption_at", - datetime.now(timezone.utc).isoformat(), - ) - - def _create_or_migrate_db(self) -> None: - with self._connect() as conn: - conn.execute( - "CREATE TABLE IF NOT EXISTS stars (" - "id INTEGER PRIMARY KEY AUTOINCREMENT, " - "source_id TEXT NOT NULL UNIQUE, " - "ra REAL NOT NULL, " - "dec REAL NOT NULL, " - "pmra REAL NOT NULL DEFAULT 0, " - "pmdec REAL NOT NULL DEFAULT 0, " - "phot_g_mean_mag REAL NOT NULL, " - "name_en TEXT NOT NULL DEFAULT '', " - "name_zh TEXT NOT NULL DEFAULT '', " - "description_en TEXT NOT NULL DEFAULT '', " - "description_zh TEXT NOT NULL DEFAULT '', " - "ra_now REAL NOT NULL, " - "dec_now REAL NOT NULL, " - "created_at TEXT NOT NULL, " - "updated_at TEXT NOT NULL" - ")" - ) - conn.execute( - "CREATE TABLE IF NOT EXISTS catalog_meta (" - "key TEXT PRIMARY KEY, " - "value TEXT NOT NULL" - ")" - ) - conn.execute("PRAGMA journal_mode=WAL") - conn.execute("PRAGMA synchronous=NORMAL") - self._migrate_schema() - - @staticmethod - def _is_malformed_error(exc: sqlite3.DatabaseError) -> bool: - message = str(exc).lower() - return ( - "malformed" in message - or "disk image is malformed" in message - or "file is not a database" in message - ) - - def _recover_malformed_db(self) -> None: - timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ") - backup_path = self.catalog_dir / f"stars_corrupt_{timestamp}.db" - db_exists = self.db_path.exists() - if db_exists: - try: - self.db_path.replace(backup_path) - except Exception: - try: - self.db_path.unlink() - except FileNotFoundError: - pass - for sidecar in ( - self.db_path.with_suffix(".db-wal"), - self.db_path.with_suffix(".db-shm"), - ): - try: - sidecar.unlink() - except FileNotFoundError: - pass - logger.warning( - "星表数据库已恢复,原损坏文件备份到: %s / Catalog DB recovered, backup saved at: %s", - str(backup_path) if db_exists else "N/A", - str(backup_path) if db_exists else "N/A", - ) - - def _migrate_schema(self) -> None: - """为旧库补齐新字段 / Add missing columns for legacy DB""" - required_columns = { - "name_en": "TEXT NOT NULL DEFAULT ''", - "name_zh": "TEXT NOT NULL DEFAULT ''", - "description_en": "TEXT NOT NULL DEFAULT ''", - "description_zh": "TEXT NOT NULL DEFAULT ''", - } - with self._connect() as conn: - rows = conn.execute("PRAGMA table_info(stars)").fetchall() - existing = {str(row["name"]) for row in rows} - for column_name, column_spec in required_columns.items(): - if column_name in existing: - continue - conn.execute( - f"ALTER TABLE stars ADD COLUMN {column_name} {column_spec}" - ) - - def _set_meta(self, key: str, value: str) -> None: - with self._connect() as conn: - conn.execute( - "INSERT INTO catalog_meta (key, value) VALUES (?, ?) " - "ON CONFLICT(key) DO UPDATE SET value = excluded.value", - (key, value), - ) - - def _meta(self, key: str, default: str) -> str: - with self._connect() as conn: - row = conn.execute( - "SELECT value FROM catalog_meta WHERE key = ?", (key,) - ).fetchone() - return str(row["value"]) if row else default - - def _write_seed_catalog(self, target: Path, magnitude_limit: float) -> None: - rows = [] - for row in self._SEED_ROWS: - if float(row["phot_g_mean_mag"]) > magnitude_limit: - continue - enriched = dict(row) - enriched["name_en"] = row["source_id"] - enriched["name_zh"] = row["source_id"] - enriched["description_en"] = "Seed catalog star / 种子星表星点" - enriched["description_zh"] = "种子星表星点 / Seed catalog star" - rows.append(enriched) - with target.open("w", newline="", encoding="utf-8") as f: - writer = csv.DictWriter( - f, - fieldnames=[ - "source_id", - "ra", - "dec", - "pmra", - "pmdec", - "phot_g_mean_mag", - "name_en", - "name_zh", - "description_en", - "description_zh", - ], - ) - writer.writeheader() - writer.writerows(rows) - - def _import_csv_to_db( - self, csv_file: Path, magnitude_limit: float, source: str - ) -> int: - dedup_source_ids: set[str] = set() - now_iso = datetime.now(timezone.utc).isoformat() - imported_count = 0 - with csv_file.open("r", encoding="utf-8") as f, self._connect() as conn: - reader = csv.DictReader(f) - for raw in reader: - try: - record = self._record_from_raw_row(raw, source) - except (KeyError, ValueError): - continue - if record.source_id in dedup_source_ids: - continue - if not (-90.0 <= record.dec <= 90.0 and 0.0 <= record.ra <= 360.0): - continue - if record.phot_g_mean_mag > magnitude_limit: - continue - normalized = self._normalize_record_to_observation_epoch(record) - conn.execute( - "INSERT INTO stars (source_id, ra, dec, pmra, pmdec, phot_g_mean_mag, " - "name_en, name_zh, description_en, description_zh, " - "ra_now, dec_now, created_at, updated_at) " - "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) " - "ON CONFLICT(source_id) DO UPDATE SET " - "ra = excluded.ra, dec = excluded.dec, pmra = excluded.pmra, pmdec = excluded.pmdec, " - "phot_g_mean_mag = excluded.phot_g_mean_mag, " - "name_en = excluded.name_en, name_zh = excluded.name_zh, " - "description_en = excluded.description_en, description_zh = excluded.description_zh, " - "ra_now = excluded.ra_now, dec_now = excluded.dec_now, updated_at = excluded.updated_at", - ( - normalized.source_id, - normalized.ra, - normalized.dec, - normalized.pmra, - normalized.pmdec, - normalized.phot_g_mean_mag, - normalized.name_en, - normalized.name_zh, - normalized.description_en, - normalized.description_zh, - normalized.ra, - normalized.dec, - now_iso, - now_iso, - ), - ) - dedup_source_ids.add(record.source_id) - imported_count += 1 - return imported_count - - def _normalize_record_to_observation_epoch(self, record: CatalogRecord) -> CatalogRecord: - now_year = datetime.now(timezone.utc).year - years = max(0.0, float(now_year - 2016)) - dec_offset_deg = (record.pmdec * years) / 3_600_000.0 - corrected_dec = max(-90.0, min(90.0, record.dec + dec_offset_deg)) - cos_dec = max(0.01, cos(radians(corrected_dec))) - ra_offset_deg = (record.pmra * years) / 3_600_000.0 / cos_dec - corrected_ra = (record.ra + ra_offset_deg) % 360.0 - return CatalogRecord( - source_id=record.source_id, - ra=corrected_ra, - dec=corrected_dec, - pmra=record.pmra, - pmdec=record.pmdec, - phot_g_mean_mag=record.phot_g_mean_mag, - name_en=record.name_en, - name_zh=record.name_zh, - description_en=record.description_en, - description_zh=record.description_zh, - ) - - def _record_from_raw_row(self, raw: dict[str, str], source: str) -> CatalogRecord: - """将来源行转为统一记录 / Convert source row to common record""" - if source == "hyg": - return self._record_from_hyg_row(raw) - return CatalogRecord.from_row(raw) - - def _record_from_hyg_row(self, raw: dict[str, str]) -> CatalogRecord: - """解析 HYG 行 / Parse HYG row""" - raw_id = raw.get("id") or raw.get("hip") or raw.get("hd") or raw.get("hr") - if not raw_id: - raise ValueError("missing id") - source_id = f"hyg_{str(raw_id).strip()}" - ra_hours = float(raw.get("ra", "0") or 0.0) - ra_deg = (ra_hours * 15.0) % 360.0 - dec_deg = float(raw.get("dec", "0") or 0.0) - pmra = float(raw.get("pmra", "0") or 0.0) - pmdec = float(raw.get("pmdec", "0") or 0.0) - mag = float(raw.get("mag", "99") or 99.0) - proper_name = (raw.get("proper") or "").strip() - bayer_name = (raw.get("bf") or "").strip() - name_en = proper_name or bayer_name or source_id - name_zh = self._COMMON_CN_NAMES.get(name_en.lower(), name_en) - constellation = (raw.get("con") or "").strip() - description_en = ( - f"Star {name_en}; mag={mag:.2f}; constellation={constellation or 'unknown'}." - ) - description_zh = ( - f"恒星{name_zh};星等={mag:.2f};星座={constellation or '未知'}。" - ) - return CatalogRecord( - source_id=source_id, - ra=ra_deg, - dec=dec_deg, - pmra=pmra, - pmdec=pmdec, - phot_g_mean_mag=mag, - name_en=name_en, - name_zh=name_zh, - description_en=description_en, - description_zh=description_zh, - ) - - def _download_file(self, url: str, target: Path) -> None: - """下载文件,支持 urllib/curl 回退 / Download file with urllib/curl fallback""" - try: - urlretrieve(url, target) # noqa: S310 - controlled by API input - return - except Exception: - pass - result = subprocess.run( - ["curl", "-L", "--fail", "-o", str(target), url], - check=False, - capture_output=True, - text=True, - ) - if result.returncode != 0: - raise RuntimeError( - f"下载失败 / Download failed: {result.stderr.strip() or result.stdout.strip()}" - ) - - @staticmethod - def _gunzip_file(src: Path, dst: Path) -> None: - """解压 gzip 文件 / Decompress gzip file""" - import gzip - import shutil - - with gzip.open(src, "rb") as reader, dst.open("wb") as writer: - shutil.copyfileobj(reader, writer) - - @staticmethod - def _sha256_of_file(path: Path) -> str: - digest = hashlib.sha256() - with path.open("rb") as f: - for chunk in iter(lambda: f.read(8192), b""): - digest.update(chunk) - return digest.hexdigest() - - -catalog_service = CatalogService() diff --git a/ogscope/hardware/camera.py b/ogscope/hardware/camera.py index 533b304..3ad7931 100644 --- a/ogscope/hardware/camera.py +++ b/ogscope/hardware/camera.py @@ -1,86 +1,96 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ 相机驱动模块 支持 Raspberry Pi Zero 2W 的 MIPI CSI 接口 IMX327 相机 """ import logging -from typing import Optional, Tuple, Dict, Any -import numpy as np from abc import ABC, abstractmethod +from typing import Any, Optional + +import numpy as np logger = logging.getLogger(__name__) class CameraInterface(ABC): """相机接口抽象类 / Camera interface abstract class""" - + @abstractmethod def initialize(self) -> bool: """初始化相机 / Initialize camera""" pass - + @abstractmethod def start_capture(self) -> bool: """开始图像捕获 / Start image capture""" pass - + @abstractmethod def stop_capture(self) -> bool: """停止图像捕获 / Stop image capture""" pass - + @abstractmethod def capture_image(self) -> Optional[np.ndarray]: """捕获单张图像 / Capture a single image""" pass - + @abstractmethod def set_exposure(self, exposure_us: int) -> bool: """设置曝光时间 / Set exposure time""" pass - + @abstractmethod def set_gain(self, analogue_gain: float, digital_gain: float = 1.0) -> bool: """设置增益 / Set gain""" pass - + @abstractmethod - def get_camera_info(self) -> Dict[str, Any]: + def get_camera_info(self) -> dict[str, Any]: """获取相机信息 / Get camera information""" pass class IMX327MIPICamera(CameraInterface): """IMX327 MIPI 相机驱动 - 基于 Picamera2 / IMX327 MIPI camera driver - based on Picamera2""" + SENSOR_MAX_WIDTH = 1920 SENSOR_MAX_HEIGHT = 1020 PREVIEW_BUFFER_COUNT = 2 - - def __init__(self, config: Dict[str, Any]): + MANUAL_CONTROL_RANGE_DEFAULTS = { + "ExposureTime": {"min": 1000, "max": 100000, "default": 10000, "step": 1000}, + "AnalogueGain": {"min": 1.0, "max": 16.0, "default": 1.0, "step": 0.1}, + "DigitalGain": {"min": 1.0, "max": 4.0, "default": 1.0, "step": 0.1}, + } + + def __init__(self, config: dict[str, Any]): self.config = config self.camera = None self.is_initialized = False self.is_capturing = False - + # 相机参数 / Camera parameters - requested_width = int(config.get('width', 640)) - requested_height = int(config.get('height', 360)) - self.width, self.height = self._sanitize_output_resolution(requested_width, requested_height) - self.fps = config.get('fps', 5) - self.exposure_us = config.get('exposure_us', 10000) - self.analogue_gain = config.get('analogue_gain', 1.0) - self.digital_gain = config.get('digital_gain', 1.0) - self.auto_exposure = config.get('auto_exposure', False) - self.auto_gain = config.get('auto_gain', False) - self.rotation = config.get('rotation', 0) - self.color_mode = config.get('color_mode', 'color') # 'color' | 'mono' - self.white_balance_mode = config.get('white_balance_mode', 'auto') - self.white_balance_gain_r = config.get('white_balance_gain_r', 1.0) - self.white_balance_gain_b = config.get('white_balance_gain_b', 1.0) + requested_width = int(config.get("width", 640)) + requested_height = int(config.get("height", 360)) + self.width, self.height = self._sanitize_output_resolution( + requested_width, requested_height + ) + self.fps = config.get("fps", 5) + self.exposure_us = config.get("exposure_us", 10000) + self.analogue_gain = config.get("analogue_gain", 1.0) + self.digital_gain = config.get("digital_gain", 1.0) + self.auto_exposure = config.get("auto_exposure", False) + self.auto_gain = config.get("auto_gain", False) + self.rotation = config.get("rotation", 0) + self.color_mode = config.get("color_mode", "color") # 'color' | 'mono' + self.white_balance_mode = config.get("white_balance_mode", "auto") + self.white_balance_gain_r = config.get("white_balance_gain_r", 1.0) + self.white_balance_gain_b = config.get("white_balance_gain_b", 1.0) # 采样模式与尺寸(supersample: 采集分辨率可高于输出分辨率) / Sampling mode and size (supersample: acquisition resolution can be higher than output resolution) - self.sampling_mode = config.get('sampling_mode', 'supersample') # supersample | native | crop + self.sampling_mode = config.get( + "sampling_mode", "supersample" + ) # supersample | native | crop ( self.sampling_mode, self.capture_width, @@ -88,15 +98,135 @@ def __init__(self, config: Dict[str, Any]): self.output_width, self.output_height, ) = self._resolve_sampling_layout(self.sampling_mode, self.width, self.height) - - logger.info(f"初始化 IMX327 MIPI 相机: {self.width}x{self.height}@{self.fps}fps") - + + logger.info( + f"初始化 IMX327 MIPI 相机: {self.width}x{self.height}@{self.fps}fps" + ) + + @staticmethod + def _to_number(value: Any) -> Optional[float]: + try: + if value is None: + return None + if isinstance(value, bool): + return None + if isinstance(value, (int, float)): + return float(value) + return float(value) + except (TypeError, ValueError): + return None + + def _parse_control_descriptor(self, descriptor: Any) -> dict[str, float]: + parsed: dict[str, float] = {} + + if isinstance(descriptor, dict): + for key in ("min", "max", "default", "step"): + numeric_value = self._to_number(descriptor.get(key)) + if numeric_value is not None: + parsed[key] = numeric_value + return parsed + + if isinstance(descriptor, (tuple, list)): + if len(descriptor) >= 1: + min_value = self._to_number(descriptor[0]) + if min_value is not None: + parsed["min"] = min_value + if len(descriptor) >= 2: + max_value = self._to_number(descriptor[1]) + if max_value is not None: + parsed["max"] = max_value + if len(descriptor) >= 3: + default_value = self._to_number(descriptor[2]) + if default_value is not None: + parsed["default"] = default_value + if len(descriptor) >= 4: + step_value = self._to_number(descriptor[3]) + if step_value is not None: + parsed["step"] = step_value + return parsed + + for target_key, attr_names in { + "min": ("min", "minimum", "lower", "lower_bound"), + "max": ("max", "maximum", "upper", "upper_bound"), + "default": ("default",), + "step": ("step", "increment"), + }.items(): + for attr_name in attr_names: + raw_value = getattr(descriptor, attr_name, None) + numeric_value = self._to_number(raw_value) + if numeric_value is not None: + parsed[target_key] = numeric_value + break + + return parsed + + def _extract_control_range( + self, control_name: str, default_range: dict[str, float] + ) -> dict[str, float]: + result = dict(default_range) + if not self.camera: + return result + + controls = getattr(self.camera, "camera_controls", None) or {} + descriptor = controls.get(control_name) + if descriptor is None: + return result + + parsed = self._parse_control_descriptor(descriptor) + for key in ("min", "max", "default", "step"): + if key in parsed: + result[key] = parsed[key] + + min_value = result.get("min") + max_value = result.get("max") + if ( + isinstance(min_value, (int, float)) + and isinstance(max_value, (int, float)) + and min_value > max_value + ): + result["min"], result["max"] = max_value, min_value + + return result + + def get_manual_control_ranges(self) -> dict[str, dict[str, Any]]: + exposure = self._extract_control_range( + "ExposureTime", self.MANUAL_CONTROL_RANGE_DEFAULTS["ExposureTime"] + ) + analogue = self._extract_control_range( + "AnalogueGain", self.MANUAL_CONTROL_RANGE_DEFAULTS["AnalogueGain"] + ) + digital = self._extract_control_range( + "DigitalGain", self.MANUAL_CONTROL_RANGE_DEFAULTS["DigitalGain"] + ) + return { + "exposure_us": { + "min": int(round(exposure["min"])), + "max": int(round(exposure["max"])), + "default": int(round(exposure["default"])), + "step": max(1, int(round(exposure.get("step", 1)))), + }, + "analogue_gain": { + "min": float(analogue["min"]), + "max": float(analogue["max"]), + "default": float(analogue["default"]), + "step": float(analogue.get("step", 0.1)), + }, + "digital_gain": { + "min": float(digital["min"]), + "max": float(digital["max"]), + "default": float(digital["default"]), + "step": float(digital.get("step", 0.1)), + "supported": "DigitalGain" + in (getattr(self.camera, "camera_controls", {}) or {}), + }, + } + @staticmethod def _align_even(value: int) -> int: value = max(2, int(value)) return value if value % 2 == 0 else value - 1 - def _sanitize_output_resolution(self, width: int, height: int) -> Tuple[int, int]: + def _sanitize_output_resolution(self, width: int, height: int) -> tuple[int, int]: safe_w = min(self.SENSOR_MAX_WIDTH, max(160, int(width))) safe_h = min(self.SENSOR_MAX_HEIGHT, max(120, int(height))) safe_w = self._align_even(safe_w) @@ -109,7 +239,7 @@ def _sanitize_output_resolution(self, width: int, height: int) -> Tuple[int, int def _resolve_sampling_layout( self, mode: str, output_width: int, output_height: int - ) -> Tuple[str, int, int, int, int]: + ) -> tuple[str, int, int, int, int]: output_width, output_height = self._sanitize_output_resolution( output_width, output_height ) @@ -120,9 +250,7 @@ def _resolve_sampling_layout( if mode == "supersample" and ( output_width >= capture_w or output_height >= capture_h ): - logger.warning( - "当前分辨率下超采样无有效增益,自动切换为 native 模式" - ) + logger.warning("当前分辨率下超采样无有效增益,自动切换为 native 模式") mode = "native" return mode, capture_w, capture_h, output_width, output_height @@ -160,26 +288,29 @@ def _resize_preserve_fov( cv2.BORDER_CONSTANT, value=border_value, ) - + def initialize(self) -> bool: """初始化 MIPI 相机 / Initialize MIPI camera""" try: from picamera2 import Picamera2 - + self.camera = Picamera2() - + # 统一使用RGB888格式,颜色模式转换在图像处理阶段进行 / RGB888 format is uniformly used, and color mode conversion is performed in the image processing stage. # 这样可以保持相机配置的一致性,避免格式兼容性问题 / This maintains consistency in camera configuration and avoids format compatibility issues main_format = "RGB888" - + # 配置相机 / Configure camera camera_config = self.camera.create_video_configuration( - main={"size": (self.capture_width, self.capture_height), "format": main_format}, + main={ + "size": (self.capture_width, self.capture_height), + "format": main_format, + }, buffer_count=self.PREVIEW_BUFFER_COUNT, ) - + self.camera.configure(camera_config) - + # 设置相机控制参数 / Set camera control parameters # 构建控制参数,兼容部分固件未提供 DigitalGain 的情况 / Build control parameters, compatible with some firmwares that do not provide DigitalGain controls = { @@ -194,29 +325,34 @@ def initialize(self) -> bool: except Exception: # DigitalGain 不被支持时,退化为不设置该项 / When DigitalGain is not supported, it will degenerate to not setting this item. self.camera.set_controls(controls) - + self.is_initialized = True logger.info("IMX327 MIPI 相机初始化成功") return True - + except ImportError: - logger.error("Picamera2 库未安装,请运行: sudo apt install python3-picamera2") + logger.error( + "Picamera2 库未安装,请运行: sudo apt install python3-picamera2" + ) return False except Exception as e: logger.error(f"相机初始化失败: {e}") return False - + def start_capture(self) -> bool: """开始图像捕获 / Start image capture""" if not self.is_initialized: logger.error("相机未初始化") return False - + try: # 使用视频配置以获得更高实时性 / Use video configuration for greater real-time performance try: video_config = self.camera.create_video_configuration( - main={"size": (self.capture_width, self.capture_height), "format": "RGB888"}, + main={ + "size": (self.capture_width, self.capture_height), + "format": "RGB888", + }, buffer_count=self.PREVIEW_BUFFER_COUNT, ) self.camera.configure(video_config) @@ -239,7 +375,9 @@ def start_capture(self) -> bool: "AnalogueGain": self.analogue_gain, } try: - self.camera.set_controls({**controls, "DigitalGain": self.digital_gain}) + self.camera.set_controls( + {**controls, "DigitalGain": self.digital_gain} + ) except Exception: self.camera.set_controls(controls) except Exception as e: @@ -252,12 +390,12 @@ def start_capture(self) -> bool: except Exception as e: logger.error(f"启动相机失败: {e}") return False - + def stop_capture(self) -> bool: """停止图像捕获 / Stop image capture""" if not self.is_capturing: return True - + try: self.camera.stop() self.is_capturing = False @@ -266,30 +404,33 @@ def stop_capture(self) -> bool: except Exception as e: logger.error(f"停止相机失败: {e}") return False - + def capture_image(self) -> Optional[np.ndarray]: """捕获单张图像 / Capture a single image""" if not self.is_initialized: logger.error("相机未初始化") return None - + if not self.is_capturing: logger.error("相机未在捕获状态") return None - + try: # 捕获图像 / capture image image = self.camera.capture_array() - + # 如果是 RAW 格式,需要转换为 RGB / If it is RAW format, it needs to be converted to RGB if len(image.shape) == 2: # RAW 格式 / RAW format # 这里需要实现 RAW 到 RGB 的转换 / Here you need to implement RAW to RGB conversion # 暂时返回原始数据 / Temporarily return to original data pass - + # 输出重采样(保持最大视野) / Output resampling (preserve maximum field of view) try: - if (self.output_width, self.output_height) != (image.shape[1], image.shape[0]): + if (self.output_width, self.output_height) != ( + image.shape[1], + image.shape[0], + ): original_shape = image.shape[:2] image = self._resize_preserve_fov( image, @@ -305,18 +446,19 @@ def capture_image(self) -> Optional[np.ndarray]: # 应用旋转 / Apply rotation if self.rotation != 0: image = self.apply_rotation(image, self.rotation) - + # 应用颜色模式转换 / Apply color mode conversion - if self.color_mode == 'mono' and len(image.shape) == 3: + if self.color_mode == "mono" and len(image.shape) == 3: # 将彩色图像转换为灰度 / Convert color image to grayscale import cv2 + gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 转换为3通道灰度图像(保持兼容性) / Convert to 3-channel grayscale image (maintain compatibility) image = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB) logger.debug("应用黑白模式转换") - + return image - + except Exception as e: logger.error(f"捕获图像失败: {e}") return None @@ -343,12 +485,14 @@ def get_video_frame(self) -> Optional[np.ndarray]: return None return self.capture_image() - def set_resolution(self, width: int, height: int, fps: Optional[int] = None) -> bool: + def set_resolution( + self, width: int, height: int, fps: Optional[int] = None + ) -> bool: """运行时切换分辨率 / Switch resolution at runtime""" if not self.is_initialized: logger.error("相机未初始化") return False - + new_w, new_h = self._sanitize_output_resolution(int(width), int(height)) if fps is not None: self.fps = int(fps) @@ -390,13 +534,19 @@ def set_resolution(self, width: int, height: int, fps: Optional[int] = None) -> return False try: video_config = self.camera.create_video_configuration( - main={"size": (self.capture_width, self.capture_height), "format": "RGB888"}, + main={ + "size": (self.capture_width, self.capture_height), + "format": "RGB888", + }, buffer_count=self.PREVIEW_BUFFER_COUNT, ) self.camera.configure(video_config) except Exception: still_cfg = self.camera.create_still_configuration( - main={"size": (self.capture_width, self.capture_height), "format": "RGB888"} + main={ + "size": (self.capture_width, self.capture_height), + "format": "RGB888", + } ) self.camera.configure(still_cfg) try: @@ -434,13 +584,13 @@ def set_fps(self, fps: int) -> bool: except Exception as e: logger.error(f"设置帧率失败: {e}") return False - + def set_exposure(self, exposure_us: int) -> bool: """设置曝光时间 / Set exposure time""" if not self.is_initialized: logger.error("相机未初始化") return False - + try: self.camera.set_controls({"AeEnable": False, "ExposureTime": exposure_us}) self.exposure_us = exposure_us @@ -450,13 +600,13 @@ def set_exposure(self, exposure_us: int) -> bool: except Exception as e: logger.error(f"设置曝光时间失败: {e}") return False - + def set_gain(self, analogue_gain: float, digital_gain: float = 1.0) -> bool: """设置增益 / Set gain""" if not self.is_initialized: logger.error("相机未初始化") return False - + try: # 手动设置增益时显式关闭自动曝光,避免控制冲突 / Explicitly turn off automatic exposure when setting gain manually to avoid control conflicts try: @@ -466,14 +616,11 @@ def set_gain(self, analogue_gain: float, digital_gain: float = 1.0) -> bool: # 优先同时设置,若不支持 DigitalGain 则退化仅设置 AnalogueGain / Priority is given to setting both at the same time. If DigitalGain is not supported, only AnalogueGain is set. try: - self.camera.set_controls({ - "AnalogueGain": analogue_gain, - "DigitalGain": digital_gain - }) + self.camera.set_controls( + {"AnalogueGain": analogue_gain, "DigitalGain": digital_gain} + ) except Exception: - self.camera.set_controls({ - "AnalogueGain": analogue_gain - }) + self.camera.set_controls({"AnalogueGain": analogue_gain}) self.analogue_gain = analogue_gain self.digital_gain = digital_gain self.auto_exposure = False @@ -500,7 +647,9 @@ def set_auto_exposure(self, enabled: bool) -> bool: "AnalogueGain": self.analogue_gain, } try: - self.camera.set_controls({**controls, "DigitalGain": self.digital_gain}) + self.camera.set_controls( + {**controls, "DigitalGain": self.digital_gain} + ) except Exception: self.camera.set_controls(controls) @@ -509,17 +658,17 @@ def set_auto_exposure(self, enabled: bool) -> bool: except Exception as e: logger.error(f"设置自动曝光失败: {e}") return False - + def set_rotation(self, rotation: int) -> bool: """设置图像旋转角度 / Set image rotation angle""" if not self.is_initialized: logger.error("相机未初始化") return False - + if rotation not in [0, 90, 180, 270]: logger.error(f"不支持的旋转角度: {rotation}") return False - + self.rotation = rotation logger.info(f"图像旋转角度设置为: {rotation}度") return True @@ -529,8 +678,8 @@ def set_sampling_mode(self, mode: str) -> bool: if not self.is_initialized: logger.error("相机未初始化") return False - - if mode not in ['supersample', 'native', 'crop']: + + if mode not in ["supersample", "native", "crop"]: logger.error(f"不支持的采样模式: {mode}") return False try: @@ -554,9 +703,8 @@ def set_sampling_mode(self, mode: str) -> bool: self.height = output_h logger.info(f"采样模式从 {old_mode} 切换到: {self.sampling_mode}") - need_reconfig = ( - (old_capture_w != self.capture_width) - or (old_capture_h != self.capture_height) + need_reconfig = (old_capture_w != self.capture_width) or ( + old_capture_h != self.capture_height ) if not need_reconfig: return True @@ -567,13 +715,19 @@ def set_sampling_mode(self, mode: str) -> bool: try: video_config = self.camera.create_video_configuration( - main={"size": (self.capture_width, self.capture_height), "format": "RGB888"}, + main={ + "size": (self.capture_width, self.capture_height), + "format": "RGB888", + }, buffer_count=self.PREVIEW_BUFFER_COUNT, ) self.camera.configure(video_config) except Exception: still_cfg = self.camera.create_still_configuration( - main={"size": (self.capture_width, self.capture_height), "format": "RGB888"} + main={ + "size": (self.capture_width, self.capture_height), + "format": "RGB888", + } ) self.camera.configure(still_cfg) @@ -584,17 +738,17 @@ def set_sampling_mode(self, mode: str) -> bool: if was_capturing: return self.start_capture() - + return True except Exception as e: logger.error(f"设置采样模式失败: {e}") return False - - def get_camera_info(self) -> Dict[str, Any]: + + def get_camera_info(self) -> dict[str, Any]: """获取相机信息 / Get camera information""" if not self.is_initialized: return {} - + try: camera_properties = self.camera.camera_properties return { @@ -618,12 +772,13 @@ def get_camera_info(self) -> Dict[str, Any]: "white_balance_mode": self.white_balance_mode, "white_balance_gain_r": self.white_balance_gain_r, "white_balance_gain_b": self.white_balance_gain_b, + "control_ranges": self.get_manual_control_ranges(), } except Exception as e: logger.error(f"获取相机信息失败: {e}") return {} - - def get_image_quality_metrics(self) -> Dict[str, Any]: + + def get_image_quality_metrics(self) -> dict[str, Any]: """获取图像质量指标 / Get image quality metrics""" if not self.is_initialized: return { @@ -632,25 +787,29 @@ def get_image_quality_metrics(self) -> Dict[str, Any]: "gain_level": 0.0, "night_mode": False, "recommended_adjustments": ["相机未初始化"], - "camera_params": {} + "camera_params": {}, } - + try: # 计算增益水平(模拟增益 + 数字增益) / Calculate gain level (analog gain + digital gain) gain_level = self.analogue_gain * self.digital_gain - + # 根据曝光时间判断夜间模式 / Determine night mode based on exposure time - night_mode = self.exposure_us > 30000 # 曝光时间超过30ms认为是夜间模式 / Exposure time longer than 30ms is considered night mode - + night_mode = ( + self.exposure_us > 30000 + ) # 曝光时间超过30ms认为是夜间模式 / Exposure time longer than 30ms is considered night mode + # 计算曝光充足度(基于曝光时间) / Calculate exposure adequacy (based on exposure time) # 假设10ms为基准曝光时间 / Assume 10ms as the base exposure time exposure_adequacy = min(1.0, self.exposure_us / 10000.0) - + # 计算噪点水平(基于增益和曝光时间) / Calculate noise level (based on gain and exposure time) # 增益越高,噪点越多;曝光时间越长,噪点也越多 / The higher the gain, the more noise; the longer the exposure time, the more noise - noise_level = min(1.0, (gain_level - 1.0) * 0.1 + (self.exposure_us - 10000) / 100000.0) + noise_level = min( + 1.0, (gain_level - 1.0) * 0.1 + (self.exposure_us - 10000) / 100000.0 + ) noise_level = max(0.0, noise_level) - + # 生成调整建议 / Generate adjustment suggestions recommendations = [] if noise_level > 0.7: @@ -661,7 +820,7 @@ def get_image_quality_metrics(self) -> Dict[str, Any]: recommendations.append("增益过高,建议降低增益以提高图像质量") if not recommendations: recommendations.append("图像质量良好,无需调整") - + return { "noise_level": round(noise_level, 3), "exposure_adequacy": round(exposure_adequacy, 3), @@ -672,14 +831,14 @@ def get_image_quality_metrics(self) -> Dict[str, Any]: "exposure_us": self.exposure_us, "analogue_gain": self.analogue_gain, "digital_gain": self.digital_gain, - "noise_reduction": getattr(self, 'noise_reduction', 0), + "noise_reduction": getattr(self, "noise_reduction", 0), "width": self.width, "height": self.height, "fps": self.fps, - "sampling_mode": self.sampling_mode - } + "sampling_mode": self.sampling_mode, + }, } - + except Exception as e: logger.error(f"获取图像质量指标失败: {e}") return { @@ -688,15 +847,15 @@ def get_image_quality_metrics(self) -> Dict[str, Any]: "gain_level": 0.0, "night_mode": False, "recommended_adjustments": [f"获取质量指标失败: {str(e)}"], - "camera_params": {} + "camera_params": {}, } - + def set_noise_reduction(self, level: int) -> bool: """设置降噪级别 (0-4) / Set noise reduction level (0-4)""" if not self.is_initialized: logger.error("相机未初始化") return False - + try: # 将级别映射到相机控制参数 / Map levels to camera control parameters noise_reduction_mode = min(max(level, 0), 4) @@ -706,33 +865,33 @@ def set_noise_reduction(self, level: int) -> bool: except Exception as e: logger.error(f"设置降噪级别失败: {e}") return False - - def set_white_balance(self, mode: str, gain_r: float = 1.0, gain_b: float = 1.0) -> bool: + + def set_white_balance( + self, mode: str, gain_r: float = 1.0, gain_b: float = 1.0 + ) -> bool: """设置白平衡模式 / Set white balance mode""" if not self.is_initialized: logger.error("相机未初始化") return False - + try: if mode == "auto": self.camera.set_controls({"AwbEnable": True}) self.white_balance_mode = "auto" logger.info("白平衡设置为自动模式") elif mode == "manual": - self.camera.set_controls({ - "AwbEnable": False, - "ColourGains": (gain_r, gain_b) - }) + self.camera.set_controls( + {"AwbEnable": False, "ColourGains": (gain_r, gain_b)} + ) self.white_balance_mode = "manual" self.white_balance_gain_r = gain_r self.white_balance_gain_b = gain_b logger.info(f"白平衡设置为手动模式: R={gain_r}, B={gain_b}") elif mode == "night": # 夜间模式:稍微偏暖色调 / Night mode: Slightly warmer tones - self.camera.set_controls({ - "AwbEnable": False, - "ColourGains": (1.1, 0.9) - }) + self.camera.set_controls( + {"AwbEnable": False, "ColourGains": (1.1, 0.9)} + ) self.white_balance_mode = "night" self.white_balance_gain_r = 1.1 self.white_balance_gain_b = 0.9 @@ -740,120 +899,141 @@ def set_white_balance(self, mode: str, gain_r: float = 1.0, gain_b: float = 1.0) else: logger.error(f"不支持的白平衡模式: {mode}") return False - + return True except Exception as e: logger.error(f"设置白平衡失败: {e}") return False - - def set_image_enhancement(self, contrast: float = 1.0, brightness: float = 0.0, - saturation: float = 1.0, sharpness: float = 1.0) -> bool: + + def set_image_enhancement( + self, + contrast: float = 1.0, + brightness: float = 0.0, + saturation: float = 1.0, + sharpness: float = 1.0, + ) -> bool: """设置图像增强参数 / Set image enhancement parameters""" if not self.is_initialized: logger.error("相机未初始化") return False - + try: # 构建增强参数 / Build enhancement parameters enhancement_controls = {} - + # 对比度 (0.5-2.0) / Contrast (0.5-2.0) if 0.5 <= contrast <= 2.0: enhancement_controls["Contrast"] = contrast - + # 亮度 (-1.0 到 1.0) / Brightness (-1.0 to 1.0) if -1.0 <= brightness <= 1.0: enhancement_controls["Brightness"] = brightness - + # 饱和度 (0.0-2.0) / Saturation (0.0-2.0) if 0.0 <= saturation <= 2.0: enhancement_controls["Saturation"] = saturation - + # 锐度 (0.0-2.0) / Sharpness (0.0-2.0) if 0.0 <= sharpness <= 2.0: enhancement_controls["Sharpness"] = sharpness - + if enhancement_controls: self.camera.set_controls(enhancement_controls) - logger.info(f"图像增强参数设置: 对比度={contrast}, 亮度={brightness}, 饱和度={saturation}, 锐度={sharpness}") + logger.info( + f"图像增强参数设置: 对比度={contrast}, 亮度={brightness}, 饱和度={saturation}, 锐度={sharpness}" + ) return True else: logger.warning("所有增强参数都在有效范围外") return False - + except Exception as e: logger.error(f"设置图像增强参数失败: {e}") return False - + def set_night_mode(self, enabled: bool) -> bool: """设置夜间模式 / Set night mode""" if not self.is_initialized: logger.error("相机未初始化") return False - + try: if enabled: # 夜间模式:提高增益,延长曝光时间,调整白平衡 / Night mode: increase gain, extend exposure time, adjust white balance - self.camera.set_controls({ - "ExposureTime": max(self.exposure_us, 30000), # 至少30ms / At least 30ms - "AnalogueGain": max(self.analogue_gain, 4.0), # 至少4x增益 / At least 4x gain - "AwbEnable": False, - "ColourGains": (1.1, 0.9), # 偏暖色调 / warmer tones - "NoiseReductionMode": 2 # 中等降噪 / Moderate noise reduction - }) + self.camera.set_controls( + { + "ExposureTime": max( + self.exposure_us, 30000 + ), # 至少30ms / At least 30ms + "AnalogueGain": max( + self.analogue_gain, 4.0 + ), # 至少4x增益 / At least 4x gain + "AwbEnable": False, + "ColourGains": (1.1, 0.9), # 偏暖色调 / warmer tones + "NoiseReductionMode": 2, # 中等降噪 / Moderate noise reduction + } + ) logger.info("夜间模式已启用") else: # 关闭夜间模式:恢复默认设置 / Turn off night mode: restore default settings - self.camera.set_controls({ - "ExposureTime": self.exposure_us, - "AnalogueGain": self.analogue_gain, - "AwbEnable": True, # 恢复自动白平衡 / Restore automatic white balance - "NoiseReductionMode": 0 # 关闭降噪 / Turn off noise reduction - }) + self.camera.set_controls( + { + "ExposureTime": self.exposure_us, + "AnalogueGain": self.analogue_gain, + "AwbEnable": True, # 恢复自动白平衡 / Restore automatic white balance + "NoiseReductionMode": 0, # 关闭降噪 / Turn off noise reduction + } + ) logger.info("夜间模式已关闭") - + return True except Exception as e: logger.error(f"设置夜间模式失败: {e}") return False - + def set_color_mode(self, color_mode: str) -> bool: """设置颜色模式 - 需要重新初始化相机 / Set color mode - camera reinitialization required""" - if color_mode not in ['color', 'mono']: + if color_mode not in ["color", "mono"]: logger.error(f"不支持的颜色模式: {color_mode}") return False - + if self.color_mode == color_mode: logger.info(f"颜色模式已经是 {color_mode}") return True - + try: # 停止当前捕获 / Stop current capture was_capturing = self.is_capturing if was_capturing: self.stop_capture() - + # 更新颜色模式 / Update color mode self.color_mode = color_mode - + # 对于颜色模式,我们统一使用RGB888格式,在图像处理阶段进行转换 / For color mode, we uniformly use the RGB888 format and convert it during the image processing stage. # 这样可以保持相机配置的一致性,避免格式兼容性问题 / This maintains consistency in camera configuration and avoids format compatibility issues main_format = "RGB888" - + camera_config = self.camera.create_still_configuration( - main={"size": (self.capture_width, self.capture_height), "format": main_format}, - raw={"size": (self.capture_width, self.capture_height), "format": "SRGGB12"} + main={ + "size": (self.capture_width, self.capture_height), + "format": main_format, + }, + raw={ + "size": (self.capture_width, self.capture_height), + "format": "SRGGB12", + }, ) - + self.camera.configure(camera_config) - + # 如果之前在捕获,重新开始 / If capturing before, start again if was_capturing: self.start_capture() - + logger.info(f"颜色模式已切换为: {color_mode}") return True - + except Exception as e: logger.error(f"设置颜色模式失败: {e}") return False @@ -861,9 +1041,11 @@ def set_color_mode(self, color_mode: str) -> bool: class CameraFactory: """相机工厂类 / Camera factory class""" - + @staticmethod - def create_camera(camera_type: str, config: Dict[str, Any]) -> Optional[CameraInterface]: + def create_camera( + camera_type: str, config: dict[str, Any] + ) -> Optional[CameraInterface]: """创建相机实例 / Create camera instance""" if camera_type == "imx327_mipi": return IMX327MIPICamera(config) @@ -873,7 +1055,7 @@ def create_camera(camera_type: str, config: Dict[str, Any]) -> Optional[CameraIn # 兼容性函数,用于平滑迁移 / Compatibility function for smooth migration -def create_camera(config: Dict[str, Any]) -> Optional[CameraInterface]: +def create_camera(config: dict[str, Any]) -> Optional[CameraInterface]: """创建相机的便捷函数 / Convenience functions for creating cameras""" camera_type = config.get("type", "imx327_mipi") return CameraFactory.create_camera(camera_type, config) diff --git a/ogscope/hardware/gpio_config.py b/ogscope/hardware/gpio_config.py index 3110a26..c3b4041 100644 --- a/ogscope/hardware/gpio_config.py +++ b/ogscope/hardware/gpio_config.py @@ -1,19 +1,19 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ GPIO 配置模块 适配 Raspberry Pi Zero 2W 的引脚布局 """ import logging -from typing import Dict, Any, Optional from enum import Enum +from typing import Any, Optional logger = logging.getLogger(__name__) class PinMode(Enum): """引脚模式 / pin mode""" + INPUT = "input" OUTPUT = "output" PWM = "pwm" @@ -23,23 +23,22 @@ class PinMode(Enum): class RaspberryPiZero2WGPIO: """树莓派 Zero 2W GPIO 配置 / Raspberry Pi Zero 2W GPIO configuration""" - + # GPIO 引脚定义 (BCM 编号) / GPIO pin definition (BCM number) GPIO_PINS = { # 电源引脚 / power pin - "3V3": 1, # 3.3V 电源 / 3.3V power supply - "5V": 2, # 5V 电源 / 5V power supply - "GND": 6, # 地线 / Ground wire - + "3V3": 1, # 3.3V 电源 / 3.3V power supply + "5V": 2, # 5V 电源 / 5V power supply + "GND": 6, # 地线 / Ground wire # GPIO 引脚 / GPIO pin - "GPIO2": 2, # SDA (I2C) - "GPIO3": 3, # SCL (I2C) - "GPIO4": 4, # 通用 GPIO / General purpose GPIO - "GPIO5": 5, # 通用 GPIO / General purpose GPIO - "GPIO6": 6, # 通用 GPIO / General purpose GPIO - "GPIO7": 7, # SPI_CE1 - "GPIO8": 8, # SPI_CE0 - "GPIO9": 9, # SPI_MISO + "GPIO2": 2, # SDA (I2C) + "GPIO3": 3, # SCL (I2C) + "GPIO4": 4, # 通用 GPIO / General purpose GPIO + "GPIO5": 5, # 通用 GPIO / General purpose GPIO + "GPIO6": 6, # 通用 GPIO / General purpose GPIO + "GPIO7": 7, # SPI_CE1 + "GPIO8": 8, # SPI_CE0 + "GPIO9": 9, # SPI_MISO "GPIO10": 10, # SPI_MOSI "GPIO11": 11, # SPI_CLK "GPIO12": 12, # 通用 GPIO / General purpose GPIO @@ -59,67 +58,67 @@ class RaspberryPiZero2WGPIO: "GPIO26": 26, # 通用 GPIO / General purpose GPIO "GPIO27": 27, # 通用 GPIO / General purpose GPIO } - + # SPI 接口配置 / SPI interface configuration SPI_CONFIG = { - "bus": 0, # SPI 总线 / SPI bus - "device": 0, # SPI 设备 / SPI device - "clock_pin": 11, # GPIO11 - SCLK - "miso_pin": 9, # GPIO9 - MISO - "mosi_pin": 10, # GPIO10 - MOSI - "cs_pin": 8, # GPIO8 - CS0 - "cs1_pin": 7, # GPIO7 - CS1 - "speed": 8000000, # SPI 时钟频率 (8MHz) / SPI clock frequency (8MHz) + "bus": 0, # SPI 总线 / SPI bus + "device": 0, # SPI 设备 / SPI device + "clock_pin": 11, # GPIO11 - SCLK + "miso_pin": 9, # GPIO9 - MISO + "mosi_pin": 10, # GPIO10 - MOSI + "cs_pin": 8, # GPIO8 - CS0 + "cs1_pin": 7, # GPIO7 - CS1 + "speed": 8000000, # SPI 时钟频率 (8MHz) / SPI clock frequency (8MHz) } - + # I2C 接口配置 / I2C interface configuration I2C_CONFIG = { - "bus": 1, # I2C 总线 / I2C bus - "sda_pin": 2, # GPIO2 - SDA - "scl_pin": 3, # GPIO3 - SCL - "address": 0x3C, # 默认 I2C 地址 / Default I2C address - "speed": 100000, # I2C 时钟频率 (100kHz) / I2C clock frequency (100kHz) + "bus": 1, # I2C 总线 / I2C bus + "sda_pin": 2, # GPIO2 - SDA + "scl_pin": 3, # GPIO3 - SCL + "address": 0x3C, # 默认 I2C 地址 / Default I2C address + "speed": 100000, # I2C 时钟频率 (100kHz) / I2C clock frequency (100kHz) } - + # 显示屏 SPI 配置 / Display SPI configuration DISPLAY_SPI_CONFIG = { "bus": 0, "device": 0, - "dc_pin": 25, # 数据 / data - "rst_pin": 27, # 复位引脚 / reset pin - "cs_pin": 8, # 片选引脚 / Chip select pin - "backlight_pin": 18, # 背光控制 (PWM) / Backlight control (PWM) + "dc_pin": 25, # 数据 / data + "rst_pin": 27, # 复位引脚 / reset pin + "cs_pin": 8, # 片选引脚 / Chip select pin + "backlight_pin": 18, # 背光控制 (PWM) / Backlight control (PWM) "width": 240, "height": 320, "rotation": 0, } - + # 按键 GPIO 配置 / Button GPIO configuration BUTTON_CONFIG = { - "button1_pin": 4, # 按键1 / Button 1 - "button2_pin": 5, # 按键2 / Button 2 - "button3_pin": 6, # 按键3 / Button 3 + "button1_pin": 4, # 按键1 / Button 1 + "button2_pin": 5, # 按键2 / Button 2 + "button3_pin": 6, # 按键3 / Button 3 "button4_pin": 12, # 按键4 / Button 4 - "pull_up": True, # 内部上拉 / Internal pull-up + "pull_up": True, # 内部上拉 / Internal pull-up "debounce_ms": 50, # 防抖时间 / Anti-shake time } - + # LED 配置 / LED configuration LED_CONFIG = { - "status_led_pin": 16, # 状态 LED / Status LED + "status_led_pin": 16, # 状态 LED / Status LED "activity_led_pin": 20, # 活动 LED / Activity LED - "error_led_pin": 21, # 错误 LED / Error LED + "error_led_pin": 21, # 错误 LED / Error LED } class GPIOConfig: """GPIO 配置管理类 / GPIO configuration management class""" - - def __init__(self, config: Dict[str, Any]): + + def __init__(self, config: dict[str, Any]): self.config = config self.gpio_config = RaspberryPiZero2WGPIO() self._validate_config() - + def _validate_config(self): """验证配置 / Verify configuration""" # 检查显示屏配置 / Check display configuration @@ -129,99 +128,105 @@ def _validate_config(self): for pin in required_pins: if pin not in display_config: logger.warning(f"显示屏配置缺少 {pin}") - + # 检查按键配置 / Check button configuration button_config = self.config.get("buttons", {}) if button_config.get("enabled", False): # 验证按键引脚配置 / Verify button pin configuration pass - - def get_display_config(self) -> Dict[str, Any]: + + def get_display_config(self) -> dict[str, Any]: """获取显示屏配置 / Get display configuration""" display_config = self.config.get("display", {}) if not display_config.get("enabled", False): return {} - + # 合并默认配置和用户配置 / Merge default configuration and user configuration config = self.gpio_config.DISPLAY_SPI_CONFIG.copy() config.update(display_config) - + return config - - def get_button_config(self) -> Dict[str, Any]: + + def get_button_config(self) -> dict[str, Any]: """获取按键配置 / Get button configuration""" button_config = self.config.get("buttons", {}) if not button_config.get("enabled", False): return {} - + config = self.gpio_config.BUTTON_CONFIG.copy() config.update(button_config) - + return config - - def get_led_config(self) -> Dict[str, Any]: + + def get_led_config(self) -> dict[str, Any]: """获取 LED 配置 / Get LED configuration""" led_config = self.config.get("leds", {}) if not led_config.get("enabled", False): return {} - + config = self.gpio_config.LED_CONFIG.copy() config.update(led_config) - + return config - - def get_spi_config(self) -> Dict[str, Any]: + + def get_spi_config(self) -> dict[str, Any]: """获取 SPI 配置 / Get SPI configuration""" return self.gpio_config.SPI_CONFIG.copy() - - def get_i2c_config(self) -> Dict[str, Any]: + + def get_i2c_config(self) -> dict[str, Any]: """获取 I2C 配置 / Get I2C configuration""" return self.gpio_config.I2C_CONFIG.copy() - + def validate_pin(self, pin_name: str) -> bool: """验证引脚名称是否有效 / Verify that the pin name is valid""" return pin_name in self.gpio_config.GPIO_PINS - + def get_pin_number(self, pin_name: str) -> Optional[int]: """获取引脚编号 / Get pin number""" return self.gpio_config.GPIO_PINS.get(pin_name) - + def get_all_used_pins(self) -> list: """获取所有已使用的引脚 / Get all used pins""" used_pins = [] - + # 显示屏引脚 / Display pins display_config = self.get_display_config() if display_config: - used_pins.extend([ - display_config.get("dc_pin"), - display_config.get("rst_pin"), - display_config.get("cs_pin"), - display_config.get("backlight_pin"), - ]) - + used_pins.extend( + [ + display_config.get("dc_pin"), + display_config.get("rst_pin"), + display_config.get("cs_pin"), + display_config.get("backlight_pin"), + ] + ) + # 按键引脚 / Button pin button_config = self.get_button_config() if button_config: - used_pins.extend([ - button_config.get("button1_pin"), - button_config.get("button2_pin"), - button_config.get("button3_pin"), - button_config.get("button4_pin"), - ]) - + used_pins.extend( + [ + button_config.get("button1_pin"), + button_config.get("button2_pin"), + button_config.get("button3_pin"), + button_config.get("button4_pin"), + ] + ) + # LED 引脚 / LED pin led_config = self.get_led_config() if led_config: - used_pins.extend([ - led_config.get("status_led_pin"), - led_config.get("activity_led_pin"), - led_config.get("error_led_pin"), - ]) - + used_pins.extend( + [ + led_config.get("status_led_pin"), + led_config.get("activity_led_pin"), + led_config.get("error_led_pin"), + ] + ) + # 移除 None 值 / Remove None values used_pins = [pin for pin in used_pins if pin is not None] - + return used_pins diff --git a/ogscope/main.py b/ogscope/main.py index 4f951ce..e82bbb5 100644 --- a/ogscope/main.py +++ b/ogscope/main.py @@ -1,10 +1,9 @@ """ OGScope 主程序入口 """ + import asyncio import sys -from pathlib import Path -from typing import Optional import uvicorn from loguru import logger @@ -18,14 +17,14 @@ def setup_environment() -> Settings: """初始化环境 / Initialize environment""" # 加载配置 / Load configuration settings = get_settings() - + # 配置日志 / Configuration log setup_logging(settings.log_level, settings.log_file) - + logger.info(f"OGScope v{__version__} 启动中...") logger.info(f"运行环境: {settings.environment}") logger.info(f"日志级别: {settings.log_level}") - + return settings @@ -33,15 +32,15 @@ async def main() -> int: """主函数 / main function""" try: settings = setup_environment() - + # TODO: 初始化各个模块 / TODO: Initialize each module # - 相机模块 / - camera module # - 显示模块 / - Display module # - 算法模块 / - Algorithm module - + # 启动 FastAPI Web 服务 / Start the FastAPI web service logger.info(f"启动 Web 服务: http://{settings.host}:{settings.port}") - + config = uvicorn.Config( "ogscope.web.app:app", host=settings.host, @@ -51,9 +50,9 @@ async def main() -> int: ) server = uvicorn.Server(config) await server.serve() - + return 0 - + except KeyboardInterrupt: logger.info("收到退出信号 (Ctrl+C)") return 0 @@ -72,4 +71,3 @@ def cli() -> None: if __name__ == "__main__": cli() - diff --git a/ogscope/utils/environment.py b/ogscope/utils/environment.py index 92b473e..cbc58be 100644 --- a/ogscope/utils/environment.py +++ b/ogscope/utils/environment.py @@ -2,63 +2,57 @@ 环境检测模块 检测是否在树莓派环境中运行 """ + +import importlib.util import os import platform -import subprocess from pathlib import Path -from typing import Optional def is_raspberry_pi() -> bool: """ 检测是否在树莓派环境中运行 - + Returns: bool: 如果是树莓派环境返回True,否则返回False """ try: # 方法1: 检查 / Method 1: Check if Path("/proc/cpuinfo").exists(): - with open("/proc/cpuinfo", "r") as f: + with open("/proc/cpuinfo") as f: cpuinfo = f.read() if "BCM" in cpuinfo or "Raspberry Pi" in cpuinfo: return True - + # 方法2: 检查 / Method 2: Check if Path("/proc/device-tree/model").exists(): - with open("/proc/device-tree/model", "r") as f: + with open("/proc/device-tree/model") as f: model = f.read() if "Raspberry Pi" in model: return True - + # 方法3: 检查环境变量 / Method 3: Check environment variables if os.environ.get("RASPBERRY_PI") == "1": return True - - # 方法4: 检查是否存在树莓派特有的GPIO库 / Method 4: Check if there is a Raspberry Pi-specific GPIO library - try: - import RPi.GPIO + + # 方法4: 检查是否存在树莓派特有的GPIO库 / Method 4: Raspberry Pi GPIO module + if importlib.util.find_spec("RPi.GPIO") is not None: return True - except ImportError: - pass - - # 方法5: 检查是否存在picamera2库 / 方法5: 检查是否存在picamera2库 - try: - import picamera2 + + # 方法5: 检查是否存在 picamera2 / Method 5: picamera2 module + if importlib.util.find_spec("picamera2") is not None: return True - except ImportError: - pass - + except Exception: pass - + return False def get_device_info() -> dict: """ 获取设备信息 - + Returns: dict: 设备信息字典 """ @@ -69,13 +63,13 @@ def get_device_info() -> dict: "is_raspberry_pi": is_raspberry_pi(), "python_version": platform.python_version(), } - + # 如果是Linux系统,尝试获取更多信息 / If it is a Linux system, try to get more information if platform.system() == "Linux": try: # 获取CPU信息 / Get CPU information if Path("/proc/cpuinfo").exists(): - with open("/proc/cpuinfo", "r") as f: + with open("/proc/cpuinfo") as f: cpuinfo = f.read() if "Hardware" in cpuinfo: for line in cpuinfo.split("\n"): @@ -87,26 +81,26 @@ def get_device_info() -> dict: if line.startswith("Model"): info["model"] = line.split(":")[1].strip() break - + # 获取内存信息 / Get memory information if Path("/proc/meminfo").exists(): - with open("/proc/meminfo", "r") as f: + with open("/proc/meminfo") as f: meminfo = f.read() for line in meminfo.split("\n"): if line.startswith("MemTotal"): info["memory_total"] = line.split(":")[1].strip() break - + except Exception: pass - + return info def get_camera_capabilities() -> dict: """ 获取相机能力信息 - + Returns: dict: 相机能力信息 """ @@ -116,18 +110,16 @@ def get_camera_capabilities() -> dict: "has_usb_camera": False, "available_cameras": [], } - - # 检查picamera2 / Check picamera2 - try: - import picamera2 + + # 检查 picamera2 / Check picamera2 + if importlib.util.find_spec("picamera2") is not None: capabilities["has_picamera2"] = True capabilities["available_cameras"].append("picamera2") - except ImportError: - pass - + # 检查OpenCV相机 / Check OpenCV camera try: import cv2 + cap = cv2.VideoCapture(0) if cap.isOpened(): capabilities["has_opencv_camera"] = True @@ -135,10 +127,11 @@ def get_camera_capabilities() -> dict: cap.release() except Exception: pass - + # 检查USB相机 / Check USB camera try: import cv2 + for i in range(5): # 检查前5个设备 / Check top 5 devices cap = cv2.VideoCapture(i) if cap.isOpened(): @@ -148,25 +141,25 @@ def get_camera_capabilities() -> dict: break except Exception: pass - + return capabilities def should_use_simulation_mode() -> bool: """ 判断是否应该使用模拟模式 - + Returns: bool: 如果应该使用模拟模式返回True """ # 强制使用模拟模式的环境变量 / Environment variables to force use of simulation mode if os.environ.get("OGSCOPE_SIMULATION_MODE") == "1": return True - + # 强制禁用模拟模式的环境变量 / Environment variable to force disabling of simulation mode if os.environ.get("OGSCOPE_SIMULATION_MODE") == "0": return False - + # 默认逻辑:非树莓派环境使用模拟模式 / Default logic: non-Raspberry Pi environments use simulation mode return not is_raspberry_pi() @@ -174,7 +167,7 @@ def should_use_simulation_mode() -> bool: def get_simulation_config() -> dict: """ 获取模拟模式配置 - + Returns: dict: 模拟模式配置 """ @@ -185,7 +178,10 @@ def get_simulation_config() -> dict: "virtual_exposure": 10000, # 微秒 / microseconds "virtual_gain": 1.0, "star_field_density": 0.1, # 星点密度 / Star point density - "polar_star_position": (0.5, 0.3), # 极轴星位置 (x, y) / Polar star position (x, y) + "polar_star_position": ( + 0.5, + 0.3, + ), # 极轴星位置 (x, y) / Polar star position (x, y) "noise_level": 0.05, # 噪声水平 / noise level "atmospheric_turbulence": True, # 大气湍流效果 / atmospheric turbulence effect } diff --git a/ogscope/utils/logging_config.py b/ogscope/utils/logging_config.py index 5d0bb63..ea43515 100644 --- a/ogscope/utils/logging_config.py +++ b/ogscope/utils/logging_config.py @@ -1,6 +1,7 @@ """ 日志配置模块 """ + import sys from pathlib import Path from typing import Optional @@ -14,40 +15,39 @@ def setup_logging( ) -> None: """ 配置 Loguru 日志系统 - + Args: level: 日志级别 (DEBUG, INFO, WARNING, ERROR, CRITICAL) log_file: 日志文件路径,None 表示不输出到文件 """ # 移除默认的 handler / Remove default handler logger.remove() - + # 添加控制台输出 handler / Add console output handler logger.add( sys.stderr, format="{time:YYYY-MM-DD HH:mm:ss} | " - "{level: <8} | " - "{name}:{function}:{line} | " - "{message}", + "{level: <8} | " + "{name}:{function}:{line} | " + "{message}", level=level, colorize=True, ) - + # 添加文件输出 handler(如果指定) / Add file output handler (if specified) if log_file: log_file = Path(log_file) log_file.parent.mkdir(parents=True, exist_ok=True) - + logger.add( log_file, format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | " - "{name}:{function}:{line} | {message}", + "{name}:{function}:{line} | {message}", level=level, rotation="10 MB", # 日志文件大小达到 10MB 时轮转 / Rotate log files when size reaches 10MB retention="30 days", # 保留 30 天的日志 / Keep logs for 30 days compression="zip", # 压缩旧日志 / Compress old logs enqueue=True, # 异步写入 / Asynchronous writing ) - - logger.info(f"日志文件: {log_file.absolute()}") + logger.info(f"日志文件: {log_file.absolute()}") diff --git a/ogscope/utils/virtual_stream.py b/ogscope/utils/virtual_stream.py index a4b27a3..8c02029 100644 --- a/ogscope/utils/virtual_stream.py +++ b/ogscope/utils/virtual_stream.py @@ -2,251 +2,290 @@ 虚拟视频流生成器 用于开发环境模拟相机视频流 """ -import io + import math import random import time -from typing import Tuple, Optional -import numpy as np -from PIL import Image, ImageDraw, ImageFont +from typing import Optional + import cv2 +import numpy as np class VirtualVideoStream: """虚拟视频流生成器 / Virtual video stream generator""" - + def __init__(self, width: int = 1920, height: int = 1080, fps: int = 30): self.width = width self.height = height self.fps = fps self.frame_time = 1.0 / fps self.last_frame_time = 0 - + # 模拟参数 / Simulation parameters self.star_field_density = 0.1 self.polar_star_position = (0.5, 0.3) # 极轴星位置 / polar star position self.noise_level = 0.05 self.atmospheric_turbulence = True - + # 生成星点数据 / Generate star point data self.stars = self._generate_star_field() - + # 大气湍流参数 / Atmospheric turbulence parameters self.turbulence_offset = 0 self.turbulence_speed = 0.1 - + # 时间戳 / Timestamp self.start_time = time.time() - + def _generate_star_field(self) -> list: """生成星点数据 / Generate star point data""" stars = [] num_stars = int(self.width * self.height * self.star_field_density / 10000) - + for _ in range(num_stars): # 随机位置 / random location x = random.uniform(0, 1) y = random.uniform(0, 1) - + # 随机星等 (1-6等) / Random magnitude (mag 1-6) magnitude = random.uniform(1.0, 6.0) - + # 根据星等计算亮度 / Calculate brightness based on magnitude brightness = max(0, 1.0 - (magnitude - 1) / 5.0) - + # 星点大小 / Star point size size = max(1, int(3 * brightness)) - - stars.append({ - 'x': x, - 'y': y, - 'magnitude': magnitude, - 'brightness': brightness, - 'size': size, - 'twinkle_phase': random.uniform(0, 2 * math.pi) - }) - + + stars.append( + { + "x": x, + "y": y, + "magnitude": magnitude, + "brightness": brightness, + "size": size, + "twinkle_phase": random.uniform(0, 2 * math.pi), + } + ) + # 添加极轴星(北极星) / Added Polaris (Polaris) - stars.append({ - 'x': self.polar_star_position[0], - 'y': self.polar_star_position[1], - 'magnitude': 2.0, - 'brightness': 0.8, - 'size': 4, - 'twinkle_phase': 0, - 'is_polar_star': True - }) - + stars.append( + { + "x": self.polar_star_position[0], + "y": self.polar_star_position[1], + "magnitude": 2.0, + "brightness": 0.8, + "size": 4, + "twinkle_phase": 0, + "is_polar_star": True, + } + ) + return stars - + def _apply_atmospheric_turbulence(self, image: np.ndarray) -> np.ndarray: """应用大气湍流效果 / Apply atmospheric turbulence effects""" if not self.atmospheric_turbulence: return image - + # 简单的湍流效果:轻微的位置偏移 / Simple turbulence effect: slight position shift self.turbulence_offset += self.turbulence_speed - + # 创建湍流偏移 / Create turbulence offset turbulence_x = int(2 * math.sin(self.turbulence_offset)) turbulence_y = int(1 * math.cos(self.turbulence_offset * 1.3)) - + # 应用偏移 / Apply offset if turbulence_x != 0 or turbulence_y != 0: M = np.float32([[1, 0, turbulence_x], [0, 1, turbulence_y]]) image = cv2.warpAffine(image, M, (self.width, self.height)) - + return image - + def _add_noise(self, image: np.ndarray) -> np.ndarray: """添加噪声 / add noise""" if self.noise_level <= 0: return image - + # 生成随机噪声 / Generate random noise - noise = np.random.normal(0, self.noise_level * 255, image.shape).astype(np.uint8) - + noise = np.random.normal(0, self.noise_level * 255, image.shape).astype( + np.uint8 + ) + # 添加噪声到图像 / Add noise to image noisy_image = cv2.add(image, noise) - + return noisy_image - + def _draw_stars(self, image: np.ndarray) -> np.ndarray: """绘制星点 / Draw star points""" current_time = time.time() - + for star in self.stars: # 计算闪烁效果 / Calculate the flicker effect - twinkle = 0.8 + 0.2 * math.sin(star['twinkle_phase'] + current_time * 2) - brightness = star['brightness'] * twinkle - + twinkle = 0.8 + 0.2 * math.sin(star["twinkle_phase"] + current_time * 2) + brightness = star["brightness"] * twinkle + # 计算像素位置 / Calculate pixel position - x = int(star['x'] * self.width) - y = int(star['y'] * self.height) - + x = int(star["x"] * self.width) + y = int(star["y"] * self.height) + # 绘制星点 / Draw star points - size = star['size'] + size = star["size"] color = int(255 * brightness) - + # 绘制星点(圆形) / Draw star points (circles) cv2.circle(image, (x, y), size, (color, color, color), -1) - + # 为亮星添加十字光芒 / Add cross rays to bright stars if brightness > 0.7: # 水平线 / horizontal line - cv2.line(image, (x - size*2, y), (x + size*2, y), (color, color, color), 1) + cv2.line( + image, + (x - size * 2, y), + (x + size * 2, y), + (color, color, color), + 1, + ) # 垂直线 / vertical line - cv2.line(image, (x, y - size*2), (x, y + size*2), (color, color, color), 1) - + cv2.line( + image, + (x, y - size * 2), + (x, y + size * 2), + (color, color, color), + 1, + ) + # 为极轴星添加特殊标记 / Add special markers to polar stars - if star.get('is_polar_star'): + if star.get("is_polar_star"): # 绘制极轴星标记 / Draw polar star markers - cv2.circle(image, (x, y), size + 2, (0, 255, 255), 2) # 黄色圆圈 / yellow circle + cv2.circle( + image, (x, y), size + 2, (0, 255, 255), 2 + ) # 黄色圆圈 / yellow circle # 添加文字标记 / Add text tag - cv2.putText(image, "Polaris", (x + 10, y - 10), - cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1) - + cv2.putText( + image, + "Polaris", + (x + 10, y - 10), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + (0, 255, 255), + 1, + ) + return image - + def _draw_crosshair(self, image: np.ndarray) -> np.ndarray: """绘制十字准星 / draw crosshair""" center_x = self.width // 2 center_y = self.height // 2 - + # 绘制十字准星 / draw crosshair color = (255, 0, 0) # 红色 / red thickness = 2 - + # 水平线 / horizontal line - cv2.line(image, (center_x - 20, center_y), (center_x + 20, center_y), color, thickness) + cv2.line( + image, + (center_x - 20, center_y), + (center_x + 20, center_y), + color, + thickness, + ) # 垂直线 / vertical line - cv2.line(image, (center_x, center_y - 20), (center_x, center_y + 20), color, thickness) - + cv2.line( + image, + (center_x, center_y - 20), + (center_x, center_y + 20), + color, + thickness, + ) + # 中心圆 / central circle cv2.circle(image, (center_x, center_y), 8, color, thickness) - + return image - + def _draw_coordinate_grid(self, image: np.ndarray) -> np.ndarray: """绘制坐标网格 / Draw coordinate grid""" color = (64, 64, 64) # 深灰色 / dark gray thickness = 1 - + # 绘制网格线 / Draw grid lines for i in range(0, self.width, self.width // 10): cv2.line(image, (i, 0), (i, self.height), color, thickness) - + for i in range(0, self.height, self.height // 10): cv2.line(image, (0, i), (self.width, i), color, thickness) - + return image - + def generate_frame(self) -> bytes: """生成一帧图像 / generate a frame of image""" current_time = time.time() - + # 控制帧率 / 控制帧率 if current_time - self.last_frame_time < self.frame_time: time.sleep(self.frame_time - (current_time - self.last_frame_time)) - + self.last_frame_time = time.time() - + # 创建黑色背景 / Create a black background image = np.zeros((self.height, self.width, 3), dtype=np.uint8) - + # 绘制坐标网格 / Draw coordinate grid image = self._draw_coordinate_grid(image) - + # 绘制星点 / Draw star points image = self._draw_stars(image) - + # 绘制十字准星 / draw crosshair image = self._draw_crosshair(image) - + # 应用大气湍流 / Apply atmospheric turbulence image = self._apply_atmospheric_turbulence(image) - + # 添加噪声 / add noise image = self._add_noise(image) - + # 转换为JPEG / Convert to JPEG - _, buffer = cv2.imencode('.jpg', image, [cv2.IMWRITE_JPEG_QUALITY, 85]) - + _, buffer = cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85]) + return buffer.tobytes() - + def get_star_positions(self) -> list: """获取当前星点位置(用于校准) / Get the current star point position (for calibration)""" return [ { - 'x': star['x'] * self.width, - 'y': star['y'] * self.height, - 'magnitude': star['magnitude'], - 'name': 'Polaris' if star.get('is_polar_star') else f'Star_{i}' + "x": star["x"] * self.width, + "y": star["y"] * self.height, + "magnitude": star["magnitude"], + "name": "Polaris" if star.get("is_polar_star") else f"Star_{i}", } for i, star in enumerate(self.stars) ] - + def update_polar_star_position(self, x: float, y: float): """更新极轴星位置 / Update polar star position""" self.polar_star_position = (x, y) # 更新极轴星位置 / Update polar star position for star in self.stars: - if star.get('is_polar_star'): - star['x'] = x - star['y'] = y + if star.get("is_polar_star"): + star["x"] = x + star["y"] = y break - + def set_simulation_parameters(self, **kwargs): """设置模拟参数 / Set simulation parameters""" - if 'star_field_density' in kwargs: - self.star_field_density = kwargs['star_field_density'] + if "star_field_density" in kwargs: + self.star_field_density = kwargs["star_field_density"] self.stars = self._generate_star_field() - - if 'noise_level' in kwargs: - self.noise_level = kwargs['noise_level'] - - if 'atmospheric_turbulence' in kwargs: - self.atmospheric_turbulence = kwargs['atmospheric_turbulence'] + + if "noise_level" in kwargs: + self.noise_level = kwargs["noise_level"] + + if "atmospheric_turbulence" in kwargs: + self.atmospheric_turbulence = kwargs["atmospheric_turbulence"] # 全局虚拟视频流实例 / Global virtual video stream instance @@ -261,6 +300,8 @@ def get_virtual_stream() -> VirtualVideoStream: return _virtual_stream -def create_virtual_stream(width: int = 1920, height: int = 1080, fps: int = 30) -> VirtualVideoStream: +def create_virtual_stream( + width: int = 1920, height: int = 1080, fps: int = 30 +) -> VirtualVideoStream: """创建新的虚拟视频流实例 / Create a new virtual video stream instance""" return VirtualVideoStream(width, height, fps) diff --git a/ogscope/vendor/README.md b/ogscope/vendor/README.md new file mode 100644 index 0000000..4630c16 --- /dev/null +++ b/ogscope/vendor/README.md @@ -0,0 +1,8 @@ +# Vendored cedar-solve (`tetra3`) + +本目录为 **[cedar-solve](https://github.com/smroid/cedar-solve)** 仓库中的 `tetra3` 包完整拷贝(与 PyPI [`cedar-solve`](https://pypi.org/project/cedar-solve/) 同源),便于离线部署与锁定版本;**非自研解算算法**。 + +This folder is the upstream **`tetra3`** package from cedar-solve (same family as PyPI `cedar-solve`), vendored for offline boards — **not a reimplementation**. + +- 许可证 / License: [tetra3/LICENSE.txt](tetra3/LICENSE.txt)(Apache-2.0) +- `tetra3/data/default_database.npz` 体积大,不随 Git 提交;请从 cedar-solve 源码包复制到 `data/plate_solve/` 或 `tetra3/data/` / Large `default_database.npz` is not committed; copy from cedar-solve release or your `cedar-solve-master/tetra3/data/`. diff --git a/ogscope/vendor/tetra3/LICENSE.txt b/ogscope/vendor/tetra3/LICENSE.txt new file mode 100644 index 0000000..a4a2080 --- /dev/null +++ b/ogscope/vendor/tetra3/LICENSE.txt @@ -0,0 +1,199 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +----------------------------------------------------------------------------- + +Original license notice for Tetra, of which tetra3 is a derivative: +Copyright (c) 2016 brownj4 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ogscope/vendor/tetra3/__init__.py b/ogscope/vendor/tetra3/__init__.py new file mode 100644 index 0000000..6f0615e --- /dev/null +++ b/ogscope/vendor/tetra3/__init__.py @@ -0,0 +1,5 @@ +name = "tetra3" + +from .tetra3 import Tetra3, get_centroids_from_image, crop_and_downsample_image + +__all__ = ['Tetra3', 'get_centroids_from_image', 'crop_and_downsample_image'] diff --git a/ogscope/vendor/tetra3/benchmark_synthetic_fovs.py b/ogscope/vendor/tetra3/benchmark_synthetic_fovs.py new file mode 100644 index 0000000..8d2fccd --- /dev/null +++ b/ogscope/vendor/tetra3/benchmark_synthetic_fovs.py @@ -0,0 +1,155 @@ +# Copyright (c) 2024 Steven Rosenthal smr@dt3.org +# See LICENSE file in root directory for license terms. + +import math + +import numpy as np +import pytest +from scipy.spatial.transform import Rotation as R + +import tetra3 +from tetra3 import fov_util + +""" +Test utility to enumerate test FOVs from a star catalog and evaluate +Cedar's performance solving them. Adapted from code provided by Iain Clark. + +Note: Angle values are in radians unless suffixed with _deg. +""" + +def _ra_dec_from_vector(vec): + """Returns (ra, dec) from the given (x, y, z) star vector.""" + x, y, z = vec + ra = math.atan2(y, x) + dec = math.asin(z) + return (ra, dec) + + +def benchmark_synthetic_fovs(width, height, fov_deg, num_fovs, + num_centroids=20, database='default_database'): + """Synthesizes and solves star fields. + width, height: pixel count of camera + fov_deg: horizontal FOV, in degrees + num_fovs: Number of FOVs to generate. 2n + 1 FOVs are actually generated. + 0 generates a single FOV; 1 generates 3 FOVs, etc. + num_centroids: max number of centroids to pass to solver. + + Returns: dict with the following fields: + num_successes + num_failures + mean_solve_time_ms + max_solve_time_ms + solve_time_histo + histo_bin_width_ms + """ + + # TODO: apply noise to x/y centroids; apply noise to brightness ranking. + + diag_pixels = math.sqrt(width * width + height * height) + diag_fov = np.deg2rad(fov_deg * diag_pixels / width) + scale_factor = width / 2 / np.tan(np.deg2rad(fov_deg) / 2) + + # Histogram of successful solve times. + NUM_HISTO_BINS = 1000 + MAX_SOLVE_TIME_MS = 1000 + solve_time_histo = [0] * NUM_HISTO_BINS + bin_width = MAX_SOLVE_TIME_MS / NUM_HISTO_BINS + + total_solve_time_ms = 0 + max_solve_time_ms = 0 + num_successes = 0 + num_failures = 0 + + t3 = tetra3.Tetra3(load_database=database) + + print('Start solving...') + iter_count = 0 + for center_vec in fov_util.fibonacci_sphere_lattice(num_fovs): + iter_count += 1 + + ra, dec = _ra_dec_from_vector(center_vec) + if ra < 0: + ra += 2 * np.pi + + nearby_star_inds = t3._get_nearby_catalog_stars(center_vec, diag_fov / 2) + nearby_stars = t3.star_table[nearby_star_inds] + + nearby_ra = nearby_stars.transpose()[0] + nearby_dec = nearby_stars.transpose()[1] + + # un-rotate RA + nearby_ra_rot = nearby_ra - ra + + # convert rotated to cartesian + proj_xyz = np.zeros([3, nearby_ra.shape[0]]) + proj_xyz[0] = np.cos(nearby_ra_rot) * np.cos(nearby_dec) # x + proj_xyz[1] = np.sin(nearby_ra_rot) * np.cos(nearby_dec) # y + proj_xyz[2] = np.sin(nearby_dec) # z + + # rotate to remove dec of target star + # rotate from xy plane parallel to xz plane to +ve Z to zero declination + r = R.from_rotvec([0, (-np.pi / 2 + dec), 0]) + proj_xyz = r.apply(proj_xyz.transpose()).transpose() + + # project stars on z=1 plane perpendicular to boresight + proj_xyz[0] = proj_xyz[0] / proj_xyz[2] + proj_xyz[1] = proj_xyz[1] / proj_xyz[2] + + # scale to image pixels + proj_xyz_scaled = proj_xyz * scale_factor + proj_xyz_scaled[0] = proj_xyz_scaled[0] + width / 2 + proj_xyz_scaled[1] = proj_xyz_scaled[1] + height / 2 + + centroids = [] + for index in range(len(proj_xyz_scaled[0])): + x = proj_xyz_scaled[0][index] + y = proj_xyz_scaled[1][index] + # Only keep centroids within the image area. Add a small border, reflects + # that Cedar-Detect cannot detect at edge. + if x < 2 or y < 2 or x >= width - 2 or y >= height - 2: + continue + centroids.append((y, x)) + if len(centroids) >= num_centroids: + break # Keep only num_centroids brightest centroids. + + solution = t3.solve_from_centroids(centroids, size=(height, width), distortion=0, + fov_estimate=fov_deg, fov_max_error=fov_deg/10.0) + # Print progress 10 times. + if iter_count % (num_fovs / 5) == 0: + print(f'iter {iter_count}; solution for ra/dec {np.rad2deg(ra):.4f}/{np.rad2deg(dec):.4f}: {solution}') + + if solution['RA'] is None: + num_failures += 1 + continue + + num_successes += 1 + + tol = 0.05 + # We don't handle proper motion very close to the poles, so use a larger tolerance. + if abs(np.rad2deg(dec)) > (90 - fov_deg/2): + tol = 0.5 + ra_diff = np.rad2deg(ra) - solution['RA'] + if ra_diff > 180: + ra_diff -= 360 + if ra_diff < -180: + ra_diff += 360 + if abs(ra_diff) > tol: + pytest.fail(f"'expected RA {np.rad2deg(ra)}, got {solution['RA']} (dec {solution['Dec']})'") + if abs(np.rad2deg(dec) - solution['Dec']) > tol: + pytest.fail(f"expected Dec {np.rad2deg(dec)}, got {solution['Dec']}") + + total_solve_time_ms += solution['T_solve'] + time_ms = int(solution['T_solve']) + max_solve_time_ms = max(time_ms, max_solve_time_ms) + histo_bin = int(time_ms / bin_width) + if histo_bin >= len(solve_time_histo): + histo_bin = len(solve_time_histo) - 1 + solve_time_histo[histo_bin] += 1 + + return {'num_successes': num_successes, + 'num_failures': num_failures, + 'mean_solve_time_ms': total_solve_time_ms / num_successes, + 'max_solve_time_ms': max_solve_time_ms, + 'solve_time_histo': solve_time_histo, + 'histo_bin_width_ms': bin_width + } diff --git a/ogscope/vendor/tetra3/bin/README.txt b/ogscope/vendor/tetra3/bin/README.txt new file mode 100644 index 0000000..f685f64 --- /dev/null +++ b/ogscope/vendor/tetra3/bin/README.txt @@ -0,0 +1,6 @@ +This directory should be populated with an executable named +'cedar-detect-server'. This binary is a gRPC server that implements the +CedarDetect service declared at ../proto/cedar_detect.proto. + +The binary should be built from the Rust source at +https://github.com/smroid/cedar-detect. diff --git a/ogscope/vendor/tetra3/breadth_first_combinations.py b/ogscope/vendor/tetra3/breadth_first_combinations.py new file mode 100644 index 0000000..0c950f3 --- /dev/null +++ b/ogscope/vendor/tetra3/breadth_first_combinations.py @@ -0,0 +1,18 @@ +# Copyright (c) 2024 Steven Rosenthal smr@dt3.org +# See LICENSE file in root directory for license terms. + +# Developed by smr@dt3.org; please let them know if this already exists somewhere. + +def breadth_first_combinations(sequence, r): + """ Variant of itertools.combinations() that is breadth-first rather than depth-first. """ + if r == 1: + for item in sequence: + yield (item,) + return + + index = r - 1 + while index < len(sequence): + right_most_elt = sequence[index] + for prefix_combination in breadth_first_combinations(sequence[:index], r-1): + yield prefix_combination + (right_most_elt,) + index += 1 diff --git a/ogscope/vendor/tetra3/cedar_detect_client.py b/ogscope/vendor/tetra3/cedar_detect_client.py new file mode 100644 index 0000000..c0caf0f --- /dev/null +++ b/ogscope/vendor/tetra3/cedar_detect_client.py @@ -0,0 +1,182 @@ +# Copyright (c) 2024 Steven Rosenthal smr@dt3.org +# See LICENSE file in root directory for license terms. + +from __future__ import annotations +import logging +import os +import subprocess +import time +from pathlib import Path +from typing import Union + +import grpc +from multiprocessing import shared_memory +import numpy as np + +from tetra3 import cedar_detect_pb2, cedar_detect_pb2_grpc + +_bin_dir = Path(__file__).parent / "bin" + + +class CedarDetectClient: + """Executes the cedar-detect-server binary as a subprocess. That binary is a + gRPC server described by the tetra3/proto/cedar_detect.proto file. + """ + + def __init__(self, logger = None, binary_path: Union[Path, str, None] = None, port=50051): + """Spawns the cedar-detect-server subprocess. + + Args: + logger: If have a logger object, pass it in here. Otherwise one will be created + locally. + binary_path: If you wish to specify a custom location for the `cedar-detect-server` binary you + may do so, otherwise the default is to search in the relative directory "./bin" + port: Customize the `cedar-detect-server` port if running multiple instances. + """ + if logger is None: + self._logger = logging.getLogger('CedarDetectClient') + # Add new handlers to the logger. + self._logger.setLevel(logging.DEBUG) + # Console handler at INFO level + ch = logging.StreamHandler() + ch.setLevel(logging.INFO) + ch.setFormatter( + logging.Formatter('%(asctime)s:%(name)s-%(levelname)s: %(message)s')) + self._logger.addHandler(ch) + else: + self._logger = logger + self._binary_path: Path = Path(binary_path) if binary_path else _bin_dir / "cedar-detect-server" + if not self._binary_path.exists() or not self._binary_path.is_file(): + raise ValueError(f"The cedar-detect-server binary could not be found at '{self._binary_path}'.") + self._port = port + + my_env = os.environ.copy() + my_env["RUST_BACKTRACE"] = "1" + self._subprocess = subprocess.Popen([self._binary_path, '--port', str(self._port)], + env=my_env) + # Will initialize on first use. + self._stub = None + self._shmem = None + self._shmem_size = 0 + # Try shared memory, fall back if an error occurs. + self._use_shmem = True + + def __del__(self): + self._subprocess.kill() + self._del_shmem() + + def _get_stub(self): + if self._stub is None: + channel = grpc.insecure_channel('localhost:%d' % self._port) + self._stub = cedar_detect_pb2_grpc.CedarDetectStub(channel) + return self._stub + + # Returns True if the shared memory file was re-created with a new size. + def _alloc_shmem(self, size): + resized = False + if self._shmem is not None and size > self._shmem_size: + self._shmem.close() + self._shmem.unlink() + self._shmem = None + resized = True + if self._shmem is None: + self._shmem = shared_memory.SharedMemory( + "/cedar_detect_image", create=True, size=size) + self._shmem_size = size + return resized + + def _del_shmem(self): + if self._shmem is not None: + self._shmem.close() + self._shmem.unlink() + self._shmem = None + + def extract_centroids(self, image, sigma, use_binned, binning=None, + detect_hot_pixels=True, normalize_rows=True): + """Invokes the CedarDetect.ExtractCentroids() RPC. Returns [(y,x)] of the + detected star centroids. + """ + np_image = np.asarray(image, dtype=np.uint8) + (height, width) = np_image.shape + + centroids_result = None + im = None + rpc_exception = None + retried = False + while True: + if rpc_exception is not None: + # See if subprocess exited. If so, we restart it and retry once. + returncode = self._subprocess.poll() + if returncode is None: + # Subprocess still there; just propagate the exception. + raise rpc_exception + self._logger.error('Subprocess exit code: %s' % returncode) + if retried: + # We already retried once, bail. + raise rpc_exception + retried = True + rpc_exception = None + self._logger.error('Creating new subprocess') + self._subprocess = subprocess.Popen( + [self._binary_path, '--port', str(self._port)]) + self._stub = None + + if self._use_shmem: + # Use shared memory to make the gRPC calls faster. This works only + # when the client (this program) and the CedarDetect gRPC server are + # running on the same machine. + + # The image data is passed in a shared memory object, with the gRPC + # request giving the name of the shared memory object. + resized = self._alloc_shmem(size=width*height) + # Create numpy array backed by shmem. + shimg = np.ndarray(np_image.shape, dtype=np_image.dtype, buffer=self._shmem.buf) + # Copy np_image into shimg. This is much cheaper than passing image + # over the gRPC call. + shimg[:] = np_image[:] + + im = cedar_detect_pb2.Image(width=width, height=height, + shmem_name=self._shmem.name, reopen_shmem=resized) + req = cedar_detect_pb2.CentroidsRequest( + input_image=im, sigma=sigma, return_binned=False, + binning=binning, use_binned_for_star_candidates=use_binned, + detect_hot_pixels=detect_hot_pixels, normalize_rows=normalize_rows) + try: + centroids_result = self._get_stub().ExtractCentroids(req, + wait_for_ready=True, + timeout=2) + break # Succeeded, break out of retry loop. + except grpc.RpcError as err: + if err.code() == grpc.StatusCode.INTERNAL: + self._logger.warning('RPC (with shmem) failed with: %s' % err.details()) + self._del_shmem() + self._use_shmem = False + self._logger.info('No longer using shared memory for CentroidsRequest() calls') + # Fall through to non-shmem path. + else: + self._logger.error('RPC (with shmem) failed with: %s' % err.details()) + rpc_exception = err + continue # Loop to retry logic. + + if not self._use_shmem: + # Not using shared memory. The image data is passed as part of the + # gRPC request. + im = cedar_detect_pb2.Image(width=width, height=height, + image_data=np_image.tobytes()) + req = cedar_detect_pb2.CentroidsRequest( + input_image=im, sigma=sigma, return_binned=False, + binning=binning, use_binned_for_star_candidates=use_binned, + detect_hot_pixels=detect_hot_pixels, normalize_rows=normalize_rows) + try: + centroids_result = self._get_stub().ExtractCentroids(req) + break # Succeeded, break out of retry loop. + except grpc.RpcError as err: + self._logger.error('RPC failed with: %s' % err.details()) + rpc_exception = err # Loop to retry logic. + # while True + + tetra_centroids = [] # List of (y, x). + if centroids_result is not None: + for sc in centroids_result.star_candidates: + tetra_centroids.append((sc.centroid_position.y, sc.centroid_position.x)) + return tetra_centroids diff --git a/ogscope/vendor/tetra3/cedar_detect_pb2.py b/ogscope/vendor/tetra3/cedar_detect_pb2.py new file mode 100644 index 0000000..6d7bdc4 --- /dev/null +++ b/ogscope/vendor/tetra3/cedar_detect_pb2.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: tetra3/cedar_detect.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'tetra3/cedar_detect.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19tetra3/cedar_detect.proto\x12\x0c\x63\x65\x64\x61r_detect\x1a\x1egoogle/protobuf/duration.proto\"\xd6\x02\n\x10\x43\x65ntroidsRequest\x12(\n\x0binput_image\x18\x01 \x01(\x0b\x32\x13.cedar_detect.Image\x12\r\n\x05sigma\x18\x02 \x01(\x01\x12\x14\n\x08max_size\x18\x03 \x01(\x05\x42\x02\x18\x01\x12\x14\n\x07\x62inning\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\rreturn_binned\x18\x04 \x01(\x08\x12&\n\x1euse_binned_for_star_candidates\x18\x05 \x01(\x08\x12\x19\n\x11\x64\x65tect_hot_pixels\x18\x06 \x01(\x08\x12\x16\n\x0enormalize_rows\x18\t \x01(\x08\x12@\n\x1a\x65stimate_background_region\x18\x07 \x01(\x0b\x32\x17.cedar_detect.RectangleH\x01\x88\x01\x01\x42\n\n\x08_binningB\x1d\n\x1b_estimate_background_region\"N\n\tRectangle\x12\x10\n\x08origin_x\x18\x01 \x01(\x05\x12\x10\n\x08origin_y\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0e\n\x06height\x18\x04 \x01(\x05\"\xbe\x02\n\x0f\x43\x65ntroidsResult\x12\x16\n\x0enoise_estimate\x18\x01 \x01(\x01\x12 \n\x13\x62\x61\x63kground_estimate\x18\x07 \x01(\x01H\x00\x88\x01\x01\x12\x17\n\x0fhot_pixel_count\x18\x02 \x01(\x05\x12\x17\n\x0fpeak_star_pixel\x18\x06 \x01(\x05\x12\x33\n\x0fstar_candidates\x18\x03 \x03(\x0b\x32\x1a.cedar_detect.StarCentroid\x12.\n\x0c\x62inned_image\x18\x04 \x01(\x0b\x32\x13.cedar_detect.ImageH\x01\x88\x01\x01\x12\x31\n\x0e\x61lgorithm_time\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationB\x16\n\x14_background_estimateB\x0f\n\r_binned_image\"x\n\x05Image\x12\r\n\x05width\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\x12\n\nimage_data\x18\x03 \x01(\x0c\x12\x17\n\nshmem_name\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x0creopen_shmem\x18\x05 \x01(\x08\x42\r\n\x0b_shmem_name\"n\n\x0cStarCentroid\x12\x33\n\x11\x63\x65ntroid_position\x18\x01 \x01(\x0b\x32\x18.cedar_detect.ImageCoord\x12\x12\n\nbrightness\x18\x04 \x01(\x01\x12\x15\n\rnum_saturated\x18\x06 \x01(\x05\"\"\n\nImageCoord\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x32`\n\x0b\x43\x65\x64\x61rDetect\x12Q\n\x10\x45xtractCentroids\x12\x1e.cedar_detect.CentroidsRequest\x1a\x1d.cedar_detect.CentroidsResultb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tetra3.cedar_detect_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_CENTROIDSREQUEST'].fields_by_name['max_size']._loaded_options = None + _globals['_CENTROIDSREQUEST'].fields_by_name['max_size']._serialized_options = b'\030\001' + _globals['_CENTROIDSREQUEST']._serialized_start=76 + _globals['_CENTROIDSREQUEST']._serialized_end=418 + _globals['_RECTANGLE']._serialized_start=420 + _globals['_RECTANGLE']._serialized_end=498 + _globals['_CENTROIDSRESULT']._serialized_start=501 + _globals['_CENTROIDSRESULT']._serialized_end=819 + _globals['_IMAGE']._serialized_start=821 + _globals['_IMAGE']._serialized_end=941 + _globals['_STARCENTROID']._serialized_start=943 + _globals['_STARCENTROID']._serialized_end=1053 + _globals['_IMAGECOORD']._serialized_start=1055 + _globals['_IMAGECOORD']._serialized_end=1089 + _globals['_CEDARDETECT']._serialized_start=1091 + _globals['_CEDARDETECT']._serialized_end=1187 +# @@protoc_insertion_point(module_scope) diff --git a/ogscope/vendor/tetra3/cedar_detect_pb2.pyi b/ogscope/vendor/tetra3/cedar_detect_pb2.pyi new file mode 100644 index 0000000..b1b7ed0 --- /dev/null +++ b/ogscope/vendor/tetra3/cedar_detect_pb2.pyi @@ -0,0 +1,91 @@ +from google.protobuf import duration_pb2 as _duration_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class CentroidsRequest(_message.Message): + __slots__ = ("input_image", "sigma", "max_size", "binning", "return_binned", "use_binned_for_star_candidates", "detect_hot_pixels", "normalize_rows", "estimate_background_region") + INPUT_IMAGE_FIELD_NUMBER: _ClassVar[int] + SIGMA_FIELD_NUMBER: _ClassVar[int] + MAX_SIZE_FIELD_NUMBER: _ClassVar[int] + BINNING_FIELD_NUMBER: _ClassVar[int] + RETURN_BINNED_FIELD_NUMBER: _ClassVar[int] + USE_BINNED_FOR_STAR_CANDIDATES_FIELD_NUMBER: _ClassVar[int] + DETECT_HOT_PIXELS_FIELD_NUMBER: _ClassVar[int] + NORMALIZE_ROWS_FIELD_NUMBER: _ClassVar[int] + ESTIMATE_BACKGROUND_REGION_FIELD_NUMBER: _ClassVar[int] + input_image: Image + sigma: float + max_size: int + binning: int + return_binned: bool + use_binned_for_star_candidates: bool + detect_hot_pixels: bool + normalize_rows: bool + estimate_background_region: Rectangle + def __init__(self, input_image: _Optional[_Union[Image, _Mapping]] = ..., sigma: _Optional[float] = ..., max_size: _Optional[int] = ..., binning: _Optional[int] = ..., return_binned: bool = ..., use_binned_for_star_candidates: bool = ..., detect_hot_pixels: bool = ..., normalize_rows: bool = ..., estimate_background_region: _Optional[_Union[Rectangle, _Mapping]] = ...) -> None: ... + +class Rectangle(_message.Message): + __slots__ = ("origin_x", "origin_y", "width", "height") + ORIGIN_X_FIELD_NUMBER: _ClassVar[int] + ORIGIN_Y_FIELD_NUMBER: _ClassVar[int] + WIDTH_FIELD_NUMBER: _ClassVar[int] + HEIGHT_FIELD_NUMBER: _ClassVar[int] + origin_x: int + origin_y: int + width: int + height: int + def __init__(self, origin_x: _Optional[int] = ..., origin_y: _Optional[int] = ..., width: _Optional[int] = ..., height: _Optional[int] = ...) -> None: ... + +class CentroidsResult(_message.Message): + __slots__ = ("noise_estimate", "background_estimate", "hot_pixel_count", "peak_star_pixel", "star_candidates", "binned_image", "algorithm_time") + NOISE_ESTIMATE_FIELD_NUMBER: _ClassVar[int] + BACKGROUND_ESTIMATE_FIELD_NUMBER: _ClassVar[int] + HOT_PIXEL_COUNT_FIELD_NUMBER: _ClassVar[int] + PEAK_STAR_PIXEL_FIELD_NUMBER: _ClassVar[int] + STAR_CANDIDATES_FIELD_NUMBER: _ClassVar[int] + BINNED_IMAGE_FIELD_NUMBER: _ClassVar[int] + ALGORITHM_TIME_FIELD_NUMBER: _ClassVar[int] + noise_estimate: float + background_estimate: float + hot_pixel_count: int + peak_star_pixel: int + star_candidates: _containers.RepeatedCompositeFieldContainer[StarCentroid] + binned_image: Image + algorithm_time: _duration_pb2.Duration + def __init__(self, noise_estimate: _Optional[float] = ..., background_estimate: _Optional[float] = ..., hot_pixel_count: _Optional[int] = ..., peak_star_pixel: _Optional[int] = ..., star_candidates: _Optional[_Iterable[_Union[StarCentroid, _Mapping]]] = ..., binned_image: _Optional[_Union[Image, _Mapping]] = ..., algorithm_time: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ...) -> None: ... + +class Image(_message.Message): + __slots__ = ("width", "height", "image_data", "shmem_name", "reopen_shmem") + WIDTH_FIELD_NUMBER: _ClassVar[int] + HEIGHT_FIELD_NUMBER: _ClassVar[int] + IMAGE_DATA_FIELD_NUMBER: _ClassVar[int] + SHMEM_NAME_FIELD_NUMBER: _ClassVar[int] + REOPEN_SHMEM_FIELD_NUMBER: _ClassVar[int] + width: int + height: int + image_data: bytes + shmem_name: str + reopen_shmem: bool + def __init__(self, width: _Optional[int] = ..., height: _Optional[int] = ..., image_data: _Optional[bytes] = ..., shmem_name: _Optional[str] = ..., reopen_shmem: bool = ...) -> None: ... + +class StarCentroid(_message.Message): + __slots__ = ("centroid_position", "brightness", "num_saturated") + CENTROID_POSITION_FIELD_NUMBER: _ClassVar[int] + BRIGHTNESS_FIELD_NUMBER: _ClassVar[int] + NUM_SATURATED_FIELD_NUMBER: _ClassVar[int] + centroid_position: ImageCoord + brightness: float + num_saturated: int + def __init__(self, centroid_position: _Optional[_Union[ImageCoord, _Mapping]] = ..., brightness: _Optional[float] = ..., num_saturated: _Optional[int] = ...) -> None: ... + +class ImageCoord(_message.Message): + __slots__ = ("x", "y") + X_FIELD_NUMBER: _ClassVar[int] + Y_FIELD_NUMBER: _ClassVar[int] + x: float + y: float + def __init__(self, x: _Optional[float] = ..., y: _Optional[float] = ...) -> None: ... diff --git a/ogscope/vendor/tetra3/cedar_detect_pb2_grpc.py b/ogscope/vendor/tetra3/cedar_detect_pb2_grpc.py new file mode 100644 index 0000000..38dbc6c --- /dev/null +++ b/ogscope/vendor/tetra3/cedar_detect_pb2_grpc.py @@ -0,0 +1,98 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +from tetra3 import cedar_detect_pb2 as tetra3_dot_cedar__detect__pb2 + +GRPC_GENERATED_VERSION = '1.71.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in tetra3/cedar_detect_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class CedarDetectStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ExtractCentroids = channel.unary_unary( + '/cedar_detect.CedarDetect/ExtractCentroids', + request_serializer=tetra3_dot_cedar__detect__pb2.CentroidsRequest.SerializeToString, + response_deserializer=tetra3_dot_cedar__detect__pb2.CentroidsResult.FromString, + _registered_method=True) + + +class CedarDetectServicer(object): + """Missing associated documentation comment in .proto file.""" + + def ExtractCentroids(self, request, context): + """Returns INTERNAL error if the Image request's shared memory cannot be accessed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CedarDetectServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ExtractCentroids': grpc.unary_unary_rpc_method_handler( + servicer.ExtractCentroids, + request_deserializer=tetra3_dot_cedar__detect__pb2.CentroidsRequest.FromString, + response_serializer=tetra3_dot_cedar__detect__pb2.CentroidsResult.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'cedar_detect.CedarDetect', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('cedar_detect.CedarDetect', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class CedarDetect(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def ExtractCentroids(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/cedar_detect.CedarDetect/ExtractCentroids', + tetra3_dot_cedar__detect__pb2.CentroidsRequest.SerializeToString, + tetra3_dot_cedar__detect__pb2.CentroidsResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/ogscope/vendor/tetra3/cli/__init__.py b/ogscope/vendor/tetra3/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ogscope/vendor/tetra3/cli/benchmark_synthetic_fovs.py b/ogscope/vendor/tetra3/cli/benchmark_synthetic_fovs.py new file mode 100644 index 0000000..3f5fb68 --- /dev/null +++ b/ogscope/vendor/tetra3/cli/benchmark_synthetic_fovs.py @@ -0,0 +1,65 @@ +""" +Enumerate test FOVs from a star catalog and evaluate Cedar's performance +solving them. + +Example: + python benchmark_synthetic_fovs.py --width 1280 --height 960 --fov_deg 12 --num_fovs 1000 +""" +import argparse + +from tetra3 import benchmark_synthetic_fovs +from pathlib import Path +from typing import List + +def _print_histo_bin(solve_time_histo: List[float], bin_width: float): + for histo_bin, val in enumerate(solve_time_histo): + if val == 0: + continue + + hv = histo_bin * bin_width + if histo_bin < len(solve_time_histo) - 1: + print(f'{hv}-{(histo_bin + 1) * bin_width}ms: {val}') + else: + print(f'>= {hv}ms: {val}') + + +def main(): + parser = argparse.ArgumentParser(description="Synthesize FOVs and test Cedar-solve") + + # required flags + parser.add_argument("--width", type=int, required=True, + help="Width (in pixels) of image sensor.") + parser.add_argument("--height", type=int, required=True, + help="Height (in pixels) of image sensor.") + parser.add_argument("--fov_deg", type=float, required=True, + help="Horizontal field of view (in degrees) of image.") + parser.add_argument("--num_fovs", type=int, required=True, + help="Number of FOVs to synthesize (2N + 1 actually generated).") + + # optional flags + parser.add_argument("--num_centroids", type=int, default=20, + help="Maximum number of centroids to pass to solver.") + parser.add_argument("--database", type=Path, default='default_database', + help="Pattern database to load.") + + args = parser.parse_args() + + result = benchmark_synthetic_fovs.benchmark_synthetic_fovs( + args.width, args.height, args.fov_deg, args.num_fovs, args.num_centroids, + database=args.database) + + num_failures = result['num_failures'] + num_successes = result['num_successes'] + mean_solve_time_ms = result['mean_solve_time_ms'] + max_solve_time_ms = result['max_solve_time_ms'] + print( + 'Results - ' + f'num_failures: {num_failures} ' + f'mean_solve_time_ms: {mean_solve_time_ms:.1f} ' + f'max_solve_time_ms: {max_solve_time_ms}' + ) + _print_histo_bin(result['solve_time_histo'], result['histo_bin_width_ms']) + + +if __name__ == "__main__": + main() diff --git a/ogscope/vendor/tetra3/cli/generate_database.py b/ogscope/vendor/tetra3/cli/generate_database.py new file mode 100644 index 0000000..3851540 --- /dev/null +++ b/ogscope/vendor/tetra3/cli/generate_database.py @@ -0,0 +1,95 @@ +""" +Generate a database file from a star-catalog. +Provide any argument from Tetra3.generate_database() + +Example: + tetra3-gen-db --max-fov 30 path/to/database/tyc_main path/to/target.npz +""" +import argparse +from pathlib import Path +from typing import Callable, Tuple, Union + +import tetra3 + + +def _tuple_type(type_: type) -> Callable[[str], Tuple]: + def _fn(value: str) -> Tuple: + string = value.lstrip("(").rstrip(")") + return tuple(type_(s.strip()) for s in string.split(",")) + return _fn + + +def _epoch_type(value: str) -> Union[float, str, None]: + if not value or value.lower() == 'none': + return None + if value.lower() == 'now': + return 'now' + return float(value) + + +def main(): + parser = argparse.ArgumentParser(description="Generate star pattern database") + + # positional arguments + parser.add_argument("STAR_CATALOG", type=Path, help="Star catalog file to load") + parser.add_argument("SAVE_AS", type=Path, help="File location to save the database") + + # required flags + parser.add_argument("--max-fov", type=float, required=True, + help="Maximum angle (in degrees) between stars in the same pattern.") + + # optional flags + parser.add_argument("--min-fov", type=float, + help="Minimum FOV considered when the catalogue density is trimmed to size.") + parser.add_argument("--lattice-field-oversampling", type=int, default=100, + help="When uniformly distributing pattern generation fields over the " + "celestial sphere, this determines the overlap factor.") + parser.add_argument("--patterns-per-lattice-field", type=int, default=50, + help="The number of patterns generated for each lattice field. " + "Typical values are 20 to 100.") + parser.add_argument("--verification-stars-per-fov", type=int, default=150, + help="Target number of stars used for generating patterns in each FOV region. " + "Also used to limit the number of stars considered for matching in " + "solve images. Typical values are large.") + parser.add_argument("--star-max-magnitude", type=float, + help="Dimmest apparent magnitude of stars retained from star catalog. " + "When not specified causes the limiting magnitude to be computed based on " + "`min_fov` and `verification_stars_per_fov`.") + parser.add_argument("--pattern-max-error", type=float, default=0.001, + help="This value determines the number of bins into which a pattern hash's " + "edge ratios are each quantized: `pattern_bins = 0.25 / pattern_max_error` " + "Default 0.001, corresponding to pattern_bins=250. For a database with " + "limiting magnitude 7, this yields a reasonable pattern hash collision rate.") + parser.add_argument("--multiscale-step", type=float, default=1.5, + help="Determines the largest ratio between subsequent FOVs that is allowed " + "when generating a multiscale database. If the ratio max_fov/min_fov " + "is less than sqrt(multiscale_step) a single scale database is built.") + parser.add_argument("--epoch-proper-motion", type=_epoch_type, default='now', + help="Determines the end year to which stellar proper motions are propagated. " + "If 'now' (default), the current year is used. If 'none', star motions " + "are not propagated and this allows catalogue entries without proper " + "motions to be used in the database.") + parser.add_argument("--linear-probe", type=bool, default=False, + help="Determines whether the pattern hash table uses quadratic probing " + "(False) or linear probing (True).") + + args = parser.parse_args() + + t3 = tetra3.Tetra3(load_database=None) + t3.generate_database( + star_catalog=args.STAR_CATALOG, + save_as=args.SAVE_AS, + max_fov=args.max_fov, + min_fov=args.min_fov, + lattice_field_oversampling=args.lattice_field_oversampling, + patterns_per_lattice_field=args.patterns_per_lattice_field, + verification_stars_per_fov=args.verification_stars_per_fov, + star_max_magnitude=args.star_max_magnitude, + pattern_max_error=args.pattern_max_error, + multiscale_step=args.multiscale_step, + epoch_proper_motion=args.epoch_proper_motion, + linear_probe=args.linear_probe, + ) + +if __name__ == "__main__": + main() diff --git a/ogscope/vendor/tetra3/data/README.md b/ogscope/vendor/tetra3/data/README.md new file mode 100644 index 0000000..457bb81 --- /dev/null +++ b/ogscope/vendor/tetra3/data/README.md @@ -0,0 +1,7 @@ +# Tetra3 图案库 / Pattern database + +将 `default_database.npz` 放到本目录,或放到项目 `data/plate_solve/`,或通过 `OGSCOPE_SOLVER_TETRA_DATABASE_PATH` 指向绝对路径。 + +可从本机已下载的 [cedar-solve](https://github.com/smroid/cedar-solve) 源码中复制 `tetra3/data/default_database.npz`,或运行 `tetra3-gen-db`(若已安装 cedar-solve)生成。 + +Place `default_database.npz` here, or under `data/plate_solve/`, or set `OGSCOPE_SOLVER_TETRA_DATABASE_PATH`. Copy from cedar-solve `tetra3/data/` or generate with `tetra3-gen-db`. diff --git a/ogscope/vendor/tetra3/fov_util.py b/ogscope/vendor/tetra3/fov_util.py new file mode 100644 index 0000000..0b152e7 --- /dev/null +++ b/ogscope/vendor/tetra3/fov_util.py @@ -0,0 +1,33 @@ +import math +import numpy as np + +def separation_for_density(fov, stars_per_fov): + """Compute minimum separation, in same units as 'fov', for achieving the desired star + density. + fov: horizontal field of view. + stars_per_fov: desired number of stars in field of view + """ + return .6 * fov / np.sqrt(stars_per_fov) + +def num_fields_for_sky(fov): + """For a given square field of view (in radians), computes how many such + fields of view are needed to cover the entire sky, by area. + """ + return math.ceil(4 * math.pi / (fov * fov)) + +def fibonacci_sphere_lattice(n): + """Yields the 2*n+1 points of a Fibonacci lattice of the sphere. + Returned points are (x, y, z) unit vectors. + See "Measurement of areas on a sphere using Fibonacci and + latitude-longitude lattices" by Alvaro Gonzalez, at + https://arxiv.org/pdf/0912.4540.pdf. + """ + phi = (1 + math.sqrt(5)) / 2 # Golden ratio ~1.618 + golden_angle_incr = 2 * math.pi * (1 - 1 / phi) # radians + for i in range(-n, n+1): + z = i / (n + 0.5) # Ranges over (-1..1). + radius = math.sqrt(1 - z * z) # Distance from axis at z. + theta = golden_angle_incr * i + x = math.cos(theta) * radius + y = math.sin(theta) * radius + yield (x, y, z) diff --git a/ogscope/vendor/tetra3/proto/cedar_detect.proto b/ogscope/vendor/tetra3/proto/cedar_detect.proto new file mode 120000 index 0000000..dfa0615 --- /dev/null +++ b/ogscope/vendor/tetra3/proto/cedar_detect.proto @@ -0,0 +1 @@ +../../../cedar-detect/src/proto/cedar_detect.proto \ No newline at end of file diff --git a/ogscope/vendor/tetra3/tetra3.py b/ogscope/vendor/tetra3/tetra3.py new file mode 100644 index 0000000..ac381a3 --- /dev/null +++ b/ogscope/vendor/tetra3/tetra3.py @@ -0,0 +1,2861 @@ +""" +tetra3: A fast lost-in-space plate solver for star trackers. +============================================================ + +Use it to identify stars in images and get the corresponding direction (i.e. right ascension and +declination) in the sky which the camera points to. The only thing tetra3 needs to know is the +approximate field of view of your camera. + +tetra3 also includes a versatile function to find spot centroids and statistics. +Alternately, you can also use another star detection/centroiding library in conjunction +with tetra3 plate solving. Cedar Detect (https://github.com/smroid/cedar-detect) is a high +performance solution for this; see cedar_detect_client.py for a way to use tetra3 with +Cedar Detect. + +Included in the package: + + - :class:`tetra3.Tetra3`: Class to solve images and load/create databases. + - :meth:`tetra3.get_centroids_from_image`: Extract spot centroids from an image. + - :meth:`tetra3.crop_and_downsample_image`: Crop and/or downsample an image. + +The class :class:`tetra3.Tetra3` has three main methods for solving images: + + - :meth:`Tetra3.solve_from_image`: Solve the camera pointing direction of an image. + - :meth:`Tetra3.solve_from_centroids`: As above, but from a list of star centroids. + - :meth:`Tetra3.generate_database`: Create a new database for your application. + +A default database (named `default_database`) is included in the repo, it is built for a field of +view range of 10 to 30 degrees with stars up to magnitude 8. + +Note: + If you wish to build you own database (typically for a different field-of-view) you must + download a star catalogue. tetra3 supports three options: + + * The 285KB Yale Bright Star Catalog 'BSC5' containing 9,110 stars. This is complete to + to about magnitude seven and is sufficient for >20 deg field-of-view setups. + * The 51MB Hipparcos Catalogue 'hip_main' containing 118,218 stars. This contains about + three stars per square degree and is sufficient down to about >10 deg field-of-view. + * The 355MB Tycho Catalogue 'tyc_main' (also from the Hipparcos satellite mission) + containing 1,058,332 stars, around 25 per square degree. This is complete to + magnitude 10 and is sufficient down to about >3 deg field-of-view. + + The 'BSC5' data is avaiable from (use + byte format file) and 'hip_main' and 'tyc_main' are available from + (save the appropriate .dat file). The + downloaded catalogue must be placed in the tetra3/tetra3 directory. + +Cedar Solve is Free and Open-Source Software based on `Tetra` rewritten by Gustav +Pettersson at ESA, with further improvements by Steven Rosenthal. + +The original software is due to: +J. Brown, K. Stubis, and K. Cahoy, "TETRA: Star Identification with Hash Tables", +Proceedings of the AIAA/USU Conference on Small Satellites, 2017. + + + +Cedar Solve license: + Copyright 2023 Steven Rosenthal smr@dt3.org + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +tetra3 license: + Copyright 2019 the European Space Agency + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Original Tetra license notice: + Copyright (c) 2016 brownj4 + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +""" + +# Standard imports: +from pathlib import Path +import csv +import logging +import math +import itertools +from time import perf_counter as precision_timestamp +from datetime import datetime +from numbers import Number +from collections import OrderedDict + +# External imports: +import numpy as np +from numpy.linalg import norm, lstsq +import scipy.ndimage +import scipy.optimize +import scipy.stats +import scipy +from scipy.spatial import KDTree +from scipy.spatial.distance import pdist, cdist + +from PIL import Image, ImageDraw + +# Local imports. +from tetra3.breadth_first_combinations import breadth_first_combinations +from tetra3.fov_util import fibonacci_sphere_lattice, num_fields_for_sky, separation_for_density + +# Status codes returned by solve_from_image() and solve_from_centroids() +MATCH_FOUND = 1 +NO_MATCH = 2 +TIMEOUT = 3 +CANCELLED = 4 +TOO_FEW = 5 + +_MAGIC_RAND = np.uint64(2654435761) +_supported_databases = ('bsc5', 'hip_main', 'tyc_main') +_lib_root = Path(__file__).parent + +def _is_prime(n): + if n < 2: + return False + if n == 2: + return True + if n % 2 == 0: + return False + # Only check odd numbers up to sqrt(n) + for i in range(3, int(n ** 0.5) + 1, 2): + if n % i == 0: + return False + return True + +def _next_prime(n): + if n < 2: + return 2 + n = n + 1 + (n % 2) # Next odd number after n + while not _is_prime(n): + n += 2 # Skip even numbers + return n + +def _insert_at_index(pattern, hash_index, table, linear_probe): + """Inserts to table with quadratic or linear probing. Returns table index where + pattern was inserted.""" + max_ind = np.uint64(table.shape[0]) + hash_index = np.uint64(hash_index) + for c in itertools.count(): + c = np.uint64(c) + if linear_probe: + i = (hash_index + c) % max_ind + else: + i = (hash_index + c*c) % max_ind + if all(table[i, :] == 0): + table[i, :] = pattern + return i + +def _get_table_indices_from_hash(hash_index, table, linear_probe): + """Gets from table with quadratic or linear probing, returns list of all + possibly matching indices.""" + max_ind = np.uint64(table.shape[0]) + hash_index = np.uint64(hash_index) + found = [] + for c in itertools.count(): + c = np.uint64(c) + if linear_probe: + i = (hash_index + c) % max_ind + else: + i = (hash_index + c*c) % max_ind + if all(table[i, :] == 0): + return np.array(found) + else: + found.append(i) + +def _compute_pattern_key_hash(pattern_key, bin_factor): + """Computes a 64 bit hash for a given pattern_key (tuple of ordered binned edge + ratios). Can be length p list or n by p array. + """ + pattern_key = np.uint64(pattern_key) + bin_factor = np.uint64(bin_factor) + # If p is the length of the pattern_key (default 5) and B is the number of bins + # (default 50, calculated from max error), this will first give each pattern_key + # a unique index from 0 to B^p-1. + if pattern_key.ndim == 1: + return np.sum(pattern_key*bin_factor**np.arange(len(pattern_key), + dtype=np.uint64), + dtype=np.uint64) + else: + return np.sum(pattern_key*bin_factor**np.arange(pattern_key.shape[1], + dtype=np.uint64)[None, :], + axis=1, dtype=np.uint64) + +def _pattern_key_hash_to_index(pattern_key_hash, max_index, linear_probe): + """Get hash index for a given pattern key hash. + """ + max_index = np.uint64(max_index) + if linear_probe: + return pattern_key_hash % max_index + else: + # For legacy compability. + with np.errstate(over='ignore'): + return (pattern_key_hash*_MAGIC_RAND) % max_index + +def _compute_vectors(centroids, size, fov): + """Get unit vectors from star centroids (pinhole camera).""" + # compute list of (i,j,k) vectors given list of (y,x) star centroids and + # an estimate of the image's field-of-view in the x dimension + # by applying the pinhole camera equations + centroids = np.array(centroids, dtype=np.float32) + (height, width) = size[:2] + scale_factor = np.tan(fov/2)/width*2 + star_vectors = np.ones((len(centroids), 3)) + # Pixel centre of image + img_center = [height/2, width/2] + # Calculate normal vectors + star_vectors[:, 2:0:-1] = (img_center - centroids) * scale_factor + star_vectors = star_vectors / norm(star_vectors, axis=1)[:, None] + return star_vectors + +def _compute_centroids(vectors, size, fov): + """Get (undistorted) centroids from a set of (derotated) unit vectors + vectors: Nx3 of (i,j,k) where i is boresight, j is x (horizontal) + size: (height, width) in pixels. + fov: horizontal field of view in radians. + We return all centroids plus a list of centroids indices that are within + the field of view. + """ + (height, width) = size[:2] + scale_factor = -width/2/np.tan(fov/2) + centroids = scale_factor*vectors[:, 2:0:-1]/vectors[:, [0]] + centroids += [height/2, width/2] + keep = np.flatnonzero(np.logical_and( + np.all(centroids > [0, 0], axis=1), + np.all(centroids < [height, width], axis=1))) + return (centroids, keep) + +def _undistort_centroids(centroids, size, k): + """Apply r_u = r_d(1 - k'*r_d^2)/(1 - k) undistortion, where k'=k*(2/width)^2, + i.e. k is the distortion that applies width/2 away from the centre. + centroids: Nx2 pixel coordinates (y, x), (0.5, 0.5) top left pixel centre. + size: (height, width) in pixels. + k: distortion, negative is barrel, positive is pincushion + """ + centroids = np.array(centroids, dtype=np.float32) + (height, width) = size[:2] + kp = k*(2/width)**2 # k prime + # Centre + centroids -= [height/2, width/2] + r_dist = norm(centroids, axis=1) + # Scale + scale = (1 - kp*r_dist**2)/(1 - k) + centroids *= scale[:, None] + # Decentre + centroids += [height/2, width/2] + return centroids + +def _distort_centroids(centroids, size, k, tol=1e-6, maxiter=30): + """Distort centroids corresponding to r_u = r_d(1 - k'*r_d^2)/(1 - k), + where k'=k*(2/width)^2 i.e. k is the distortion that applies + width/2 away from the centre. + + Iterates with Newton-Raphson until the step is smaller than tol + or maxiter iterations have been exhausted. + """ + centroids = np.array(centroids, dtype=np.float32) + (height, width) = size[:2] + kp = k*(2/width)**2 # k prime + # Centre + centroids -= [height/2, width/2] + r_undist = norm(centroids, axis=1) + # Initial distorted guess, undistorted are the same position + r_dist = r_undist.copy() + for i in range(maxiter): + r_undist_est = r_dist*(1 - kp*r_dist**2)/(1 - k) + dru_drd = (1 - 2*kp*r_dist)/(1 - k) + error = r_undist - r_undist_est + r_dist += error/dru_drd + if np.all(np.abs(error) < tol): + break + centroids *= (r_dist/r_undist)[:, None] + centroids += [height/2, width/2] + return centroids + +def _find_rotation_matrix(image_vectors, catalog_vectors): + """Calculate the least squares best rotation matrix between the two sets of vectors. + image_vectors and catalog_vectors both Nx3. Must be ordered as matching pairs. + """ + # find the covariance matrix H between the image and catalog vectors + H = np.dot(image_vectors.T, catalog_vectors) + # use singular value decomposition to find the rotation matrix + (U, S, V) = np.linalg.svd(H) + return np.dot(U, V) + +def _find_centroid_matches(image_centroids, catalog_centroids, r): + """Find matching pairs, unique and within radius r. + image_centroids: Nx2 (y, x) in pixels + catalog_centroids: Mx2 (y, x) in pixels + r: radius in pixels + + returns Kx2 list of matches, first column is index in image_centroids, + second column is index in catalog_centroids + """ + dists = cdist(image_centroids, catalog_centroids) + matches = np.argwhere(dists < r) + # Make sure we only have unique 1-1 matches + matches = matches[np.unique(matches[:, 1], return_index=True)[1], :] + matches = matches[np.unique(matches[:, 0], return_index=True)[1], :] + return matches + +def _angle_from_distance(dist): + """Given a euclidean distance between two points on the unit sphere, + return the center angle (in radians) between the two points. + """ + return 2.0 * np.arcsin(0.5 * dist) + +def _distance_from_angle(angle): + """Return the euclidean distance between two points on the unit sphere with the + given center angle (in radians). + """ + return 2.0 * np.sin(angle / 2.0) + + +class Tetra3(): + """Solve star patterns and manage databases. + + To find the direction in the sky an image is showing, this class calculates the + geometric keys of star patterns seen in the image and looks for matching keys in a + pattern database loaded into memory. Subsequently, all stars that should be visible in + the image (based on the database pattern's location) are looked for and the match is + confirmed or rejected based on the probability that the found number of matches + happens by chance. + + Each pattern is made up of four stars, and the pattern key is created by calculating + the distances between every pair of stars in the pattern and normalising by the + longest to create a set of five numbers between zero and one. This information, and + the desired tolerance, is used to find the indices in the database where the match may + reside by a table index hashing function. See the description of + :meth:`generate_database` for more detail. + + A database needs to be generated with patterns which are of appropriate scale for the + horizontal field of view (FOV) of your camera. Therefore, generate a database using + :meth:`generate_database` with a `max_fov` which is the FOV of your camera (or + slightly larger). A database with `max_fov=30` (degrees) is included as + `default_database.npz`. + + Star locations (centroids) are found using :meth:`tetra3.get_centroids_from_image`, + use one of your images to find settings which work well for your images. Then pass + those settings as keyword arguments to :meth:`solve_from_image`. Alternately, you can + use Cedar Detect for detecting and centroiding stars in your images. + + Example 1: Load database and solve image + :: + + import tetra3 + # Create instance, automatically loads the default database + t3 = tetra3.Tetra3() + # Solve for image (PIL.Image), with some optional arguments + result = t3.solve_from_image(image, fov_estimate=11, fov_max_error=.5, max_area=300) + + Example 2: Generate and save database + :: + + import tetra3 + # Create instance without loading any database + t3 = tetra3.Tetra3(load_database=None) + # Generate and save database + t3.generate_database(max_fov=20, save_as='my_database_name') + + Args: + load_database (str or pathlib.Path, optional): Database to load. Will call + :meth:`load_database` with the provided argument after creating instance. Defaults to + 'default_database'. Can set to None to create Tetra3 object without loaded database. + debug_folder (pathlib.Path, optional): The folder for debug logging. If None (the default) + debug logging will be disabled unless handlers have been added to the `tetra3.Tetra3` + logger before creating the insance. + + """ + def __init__(self, load_database='default_database', debug_folder=None): + # Logger setup + self._debug_folder = None + self._logger = logging.getLogger('tetra3.Tetra3') + if not self._logger.hasHandlers(): + # Add new handlers to the logger if there are none + self._logger.setLevel(logging.DEBUG) + # Console handler at INFO level + ch = logging.StreamHandler() + ch.setLevel(logging.INFO) + # Format and add + formatter = logging.Formatter('%(asctime)s:%(name)s-%(levelname)s: %(message)s') + ch.setFormatter(formatter) + self._logger.addHandler(ch) + if debug_folder is not None: + self.debug_folder = debug_folder + # File handler at DEBUG level + fh = logging.FileHandler(self.debug_folder / 'tetra3.txt') + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + self._logger.addHandler(fh) + + self._logger.debug('Tetra3 Constructor called with load_database=' + str(load_database)) + self._star_table = None + self._star_kd_tree = None + self._star_catalog_IDs = None + self._pattern_catalog = None + self._num_patterns = None + self._pattern_largest_edge = None + self._pattern_key_hashes = None + self._verification_catalog = None + self._cancelled = False + + self._db_props = {'pattern_mode': None, 'hash_table_type': None, + 'pattern_size': None, 'pattern_bins': None, 'pattern_max_error': None, + 'max_fov': None, 'min_fov': None, 'star_catalog': None, + 'epoch_equinox': None, 'epoch_proper_motion': None, + 'lattice_field_oversampling': None, 'patterns_per_lattice_field': None, + 'verification_stars_per_fov': None, 'star_max_magnitude': None, + 'range_ra': None, 'range_dec': None, 'presort_patterns': None, + 'num_patterns': None} + + if load_database is not None: + self._logger.debug('Trying to load database') + self.load_database(load_database) + + @property + def debug_folder(self): + """pathlib.Path: Get or set the path for debug logging. Will create folder if not existing. + """ + return self._debug_folder + + @debug_folder.setter + def debug_folder(self, path): + # Do not do logging in here! This will be called before the logger is set up + assert isinstance(path, Path), 'Must be pathlib.Path object' + if path.is_file(): + path = path.parent + if not path.is_dir(): + path.mkdir(parents=True) + self._debug_folder = path + + @property + def has_database(self): + """bool: True if a database is loaded.""" + return not (self._star_table is None or self._pattern_catalog is None) + + @property + def star_table(self): + """numpy.ndarray: Table of stars in the database. + + The table is an array with six columns: + - Right ascension (radians) + - Declination (radians) + - x = cos(ra) * cos(dec) + - y = sin(ra) * cos(dec) + - z = sin(dec) + - Apparent magnitude + """ + return self._star_table + + @property + def star_kd_tree(self): + """KDTree: KD tree of stars in the database. + """ + return self._star_kd_tree + + @property + def pattern_catalog(self): + """numpy.ndarray: Catalog of patterns in the database.""" + return self._pattern_catalog + + @property + def num_patterns(self): + """numpy.uint32: Number of patterns in the database.""" + return self._num_patterns + + @property + def pattern_largest_edge(self): + """numpy.ndarray: Catalog of largest edges for each pattern in milliradian.""" + return self._pattern_largest_edge + + @property + def pattern_key_hashes(self): + """numpy.ndarray: Catalog of pattern key hashes for each pattern in the + database.""" + return self._pattern_key_hashes + + @property + def star_catalog_IDs(self): + """numpy.ndarray: Table of catalogue IDs for each entry in the star table. + + The table takes different format depending on the source catalogue used + to build the database. See the `star_catalog` key of + :meth:`database_properties` to find the source catalogue. + - bsc5: A numpy array of size (N,) with datatype uint16. Stores the 'BSC' number. + - hip_main: A numpy array of size (N,) with datatype uint32. Stores the 'HIP' number. + - tyc_main: A numpy array of size (N, 3) with datatype uint16. Stores the + (TYC1, TYC2, TYC3) numbers. + + Is None if no database is loaded or an older database without IDs stored. + """ + return self._star_catalog_IDs + + @property + def database_properties(self): + """dict: Dictionary of database properties. + + Keys: + - 'pattern_mode': Method used to identify star patterns. Is always 'edge_ratio'. + - 'hash_table_type': What algorithm is used for the pattern hash table. The only + values (currently) are 'quadratic_probe' and 'linear_probe'. + - 'pattern_size': Number of stars in each pattern. + - 'pattern_bins': Number of bins per dimension in pattern catalog. + - 'pattern_max_error': Maximum difference allowed in pattern for a match. + - 'max_fov': Maximum camera horizontal field of view (in degrees) the database is + built for. This will also be the angular extent of the largest pattern. + - 'min_fov': Minimum camera horizontal field of view (in degrees) the database is + built for. This drives the density of stars in the database, patterns may be + smaller than this. + - 'lattice_field_oversampling': When uniformly distributing pattern generation fields over + the celestial sphere, this determines the overlap factor. + Also stored as 'pattern_stars_per_fov' for compatibility with earlier versions. + - 'patterns_per_lattice_field': Number of patterns generated for each lattice field. + Also stored as 'pattern_stars_per_anchor_star' for compatibility with earlier versions. + - 'verification_stars_per_fov': Number of stars in solve-time FOV to retain. + - 'star_max_magnitude': Dimmest apparent magnitude of stars in database. + - 'star_catalog': Name of the star catalog (e.g. bcs5, hip_main, tyc_main) the database was + built from. Returns 'unknown' for old databases where this data was not saved. + - 'epoch_equinox': Epoch of the 'star_catalog' celestial coordinate system. Usually 2000, + but could be 1950 for old Bright Star Catalog versions. + - 'epoch_proper_motion': year to which stellar proper motions have been propagated. + - 'presort_patterns': Indicates if the pattern indices are sorted by distance to the + centroid. + - 'range_ra': Always None, no longer used. The whole sky is included in the database. + - 'range_dec': Always None, no longer used. The whole sky is included in the database. + - 'num_patterns': The number of patterns in the database. If None, this is one + half of the pattern table size. + """ + return self._db_props + + def load_database(self, path='default_database'): + """Load database from file. + + Args: + path (str or pathlib.Path): The file to load. If given a str, the file will be looked + for in the tetra3/data directory. If given a pathlib.Path, this path will be used + unmodified. The suffix .npz will be added. + """ + self._logger.debug('Got load database with: ' + str(path)) + if isinstance(path, str): + self._logger.debug('String given, append to tetra3 directory') + path = (Path(__file__).parent / 'data' / path).with_suffix('.npz') + else: + self._logger.debug('Not a string, use as path directly') + path = Path(path).with_suffix('.npz') + + self._logger.info('Loading database from: ' + str(path)) + with np.load(path) as data: + self._logger.debug('Loaded database, unpack files') + self._pattern_catalog = data['pattern_catalog'] + + self._star_table = data['star_table'] + # Insert all stars in a KD-tree for fast neighbour lookup + all_star_vectors = self._star_table[:, 2:5] + self._star_kd_tree = KDTree(all_star_vectors) + + props_packed = data['props_packed'] + try: + self._pattern_largest_edge = data['pattern_largest_edge'] + except KeyError: + self._logger.debug('Database does not have largest edge stored, set to None.') + self._pattern_largest_edge = None + try: + self._pattern_key_hashes = data['pattern_key_hashes'] + except KeyError: + self._logger.debug('Database does not have pattern key hashes stored, set to None.') + self._pattern_key_hashes = None + try: + self._star_catalog_IDs = data['star_catalog_IDs'] + except KeyError: + self._logger.debug('Database does not have catalogue IDs stored, set to None.') + self._star_catalog_IDs = None + + self._logger.debug('Unpacking properties') + for key in self._db_props.keys(): + try: + self._db_props[key] = props_packed[key][()] + self._logger.debug('Unpacked ' + str(key)+' to: ' + str(self._db_props[key])) + except ValueError: + if key == 'verification_stars_per_fov': + self._db_props[key] = props_packed['catalog_stars_per_fov'][()] + self._logger.debug('Unpacked catalog_stars_per_fov to: ' \ + + str(self._db_props[key])) + elif key == 'star_max_magnitude': + self._db_props[key] = props_packed['star_min_magnitude'][()] + self._logger.debug('Unpacked star_min_magnitude to: ' \ + + str(self._db_props[key])) + elif key == 'presort_patterns': + self._db_props[key] = False + self._logger.debug('No presort_patterns key, set to False') + elif key == 'star_catalog': + self._db_props[key] = 'unknown' + self._logger.debug('No star_catalog key, set to unknown') + elif key == 'num_patterns': + self._db_props[key] = self.pattern_catalog.shape[0] // 2 + self._logger.debug('No num_patterns key, set to half of pattern_catalog size') + else: + self._db_props[key] = None + self._logger.warning('Missing key in database (likely version difference): %s' + % str(key)) + if self._db_props['min_fov'] is None: + self._logger.debug('No min_fov key, copy from max_fov') + self._db_props['min_fov'] = self._db_props['max_fov'] + self._num_patterns = self._db_props['num_patterns'] + self._logger.debug('Database properties %s' % self._db_props) + + + def save_database(self, path): + """Save database to file. + + Args: + path (str or pathlib.Path): The file to save to. If given a str, the file will be saved + in the tetra3/data directory. If given a pathlib.Path, this path will be used + unmodified. The suffix .npz will be added. + """ + assert self.has_database, 'No database' + self._logger.debug('Got save database with: ' + str(path)) + if isinstance(path, str): + self._logger.debug('String given, append to tetra3 directory') + path = (Path(__file__).parent / 'data' / path).with_suffix('.npz') + else: + self._logger.debug('Not a string, use as path directly') + path = Path(path).with_suffix('.npz') + + self._logger.info('Saving database to: ' + str(path)) + + # Pack properties as numpy structured array + props_packed = np.array((self._db_props['pattern_mode'], + self._db_props['hash_table_type'], + self._db_props['pattern_size'], + self._db_props['pattern_bins'], + self._db_props['pattern_max_error'], + self._db_props['max_fov'], + self._db_props['min_fov'], + self._db_props['star_catalog'], + self._db_props['epoch_equinox'], + self._db_props['epoch_proper_motion'], + self._db_props['lattice_field_oversampling'], + self._db_props['anchor_stars_per_fov'], # legacy + self._db_props['pattern_stars_per_fov'], # legacy + self._db_props['patterns_per_lattice_field'], + self._db_props['patterns_per_anchor_star'], # legacy + self._db_props['verification_stars_per_fov'], + self._db_props['star_max_magnitude'], + self._db_props['simplify_pattern'], # legacy + self._db_props['range_ra'], + self._db_props['range_dec'], + self._db_props['presort_patterns'], + self._db_props['num_patterns']), + dtype=[('pattern_mode', 'U64'), + ('hash_table_type', 'U64'), + ('pattern_size', np.uint16), + ('pattern_bins', np.uint16), + ('pattern_max_error', np.float32), + ('max_fov', np.float32), + ('min_fov', np.float32), + ('star_catalog', 'U64'), + ('epoch_equinox', np.uint16), + ('epoch_proper_motion', np.float32), + ('lattice_field_oversampling', np.uint16), + ('anchor_stars_per_fov', np.uint16), + ('pattern_stars_per_fov', np.uint16), + ('patterns_per_lattice_field', np.uint16), + ('patterns_per_anchor_star', np.uint16), + ('verification_stars_per_fov', np.uint16), + ('star_max_magnitude', np.float32), + ('simplify_pattern', bool), + ('range_ra', np.float32, (2,)), + ('range_dec', np.float32, (2,)), + ('presort_patterns', bool), + ('num_patterns', np.uint32)]) + + self._logger.debug('Packed properties into: ' + str(props_packed)) + self._logger.debug('Saving as compressed numpy archive') + + to_save = {'star_table': self.star_table, + 'pattern_catalog': self.pattern_catalog, + 'props_packed': props_packed} + if self.pattern_largest_edge is not None: + to_save['pattern_largest_edge'] = self.pattern_largest_edge + if self.pattern_key_hashes is not None: + to_save['pattern_key_hashes'] = self.pattern_key_hashes + if self.star_catalog_IDs is not None: + to_save['star_catalog_IDs'] = self.star_catalog_IDs + + np.savez_compressed(path, **to_save) + + @staticmethod + def _load_catalog(star_catalog, catalog_file_full_pathname, epoch_proper_motion, logger): + """Loads the star catalog and returns at tuple of: + star_table: an array of [ra, dec, 0, 0, 0, mag] + star_catID: array of catalog IDs for the entries in star_table + epoch_equinox: the epoch of the star catalog's celestial coordinate system. + """ + + # Calculate number of star catalog entries: + if star_catalog == 'bsc5': + # See http://tdc-www.harvard.edu/catalogs/catalogsb.html + bsc5_header_type = [('STAR0', np.int32), ('STAR1', np.int32), + ('STARN', np.int32), ('STNUM', np.int32), + ('MPROP', np.int32), ('NMAG', np.int32), + ('NBENT', np.int32)] + reader = np.fromfile(catalog_file_full_pathname, dtype=bsc5_header_type, count=1) + entry = reader[0] + num_entries = entry[2] + header_length = reader.itemsize + if num_entries > 0: + epoch_equinox = 1950 + pm_origin = 1950 # this is an assumption, not specified in bsc5 docs + else: + num_entries = -num_entries + epoch_equinox = 2000 + pm_origin = 2000 # this is an assumption, not specified in bsc5 docs + # Check that the catalogue version has the data we need + stnum = entry[3] + if stnum != 1: + logger.warning('Catalogue %s has unexpected "stnum" header value: %s' % + (star_catalog, stnum)) + mprop = entry[4] + if mprop != 1: + logger.warning('Catalogue %s has unexpected "mprop" header value: %s' % + (star_catalog, mprop)) + nmag = entry[5] + if nmag != 1: + logger.warning('Catalogue %s has unexpected "nmag" header value: %s' % + (star_catalog, nmag)) + nbent = entry[6] + if nbent != 32: + logger.warning('Catalogue %s has unexpected "nbent" header value: %s' % + (star_catalog, nbent)) + elif star_catalog in ('hip_main', 'tyc_main'): + num_entries = sum(1 for _ in open(catalog_file_full_pathname)) + epoch_equinox = 2000 + pm_origin = 1991.25 + + logger.info('Loading catalogue %s with %s star entries.' % + (star_catalog, num_entries)) + + if epoch_proper_motion is None: + # If pm propagation was disabled, set end date to origin + epoch_proper_motion = pm_origin + logger.info('Using catalog RA/Dec %s epoch; not propagating proper motions from %s.' % + (epoch_equinox, pm_origin)) + else: + logger.info('Using catalog RA/Dec %s epoch; propagating proper motions from %s to %s.' % + (epoch_equinox, pm_origin, epoch_proper_motion)) + + # Preallocate star table: elements are [ra, dec, x, y, z, mag]. + star_table = np.zeros((num_entries, 6), dtype=np.float32) + # Preallocate ID table + if star_catalog == 'bsc5': + star_catID = np.zeros(num_entries, dtype=np.uint16) + elif star_catalog == 'hip_main': + star_catID = np.zeros(num_entries, dtype=np.uint32) + else: # is tyc_main + star_catID = np.zeros((num_entries, 3), dtype=np.uint16) + + # Read magnitude, RA, and Dec from star catalog: + if star_catalog == 'bsc5': + bsc5_data_type = [('ID', np.float32), ('RA', np.float64), + ('Dec', np.float64), ('type', np.int16), + ('mag', np.int16), ('RA_pm', np.float32), ('Dec_PM', np.float32)] + with open(catalog_file_full_pathname, 'rb') as star_catalog_file: + star_catalog_file.seek(header_length) # skip header + reader = np.fromfile(star_catalog_file, dtype=bsc5_data_type, count=num_entries) + for (i, entry) in enumerate(reader): + mag = entry[4]/100 + # RA/Dec in radians at epoch proper motion start. + alpha = float(entry[1]) + delta = float(entry[2]) + cos_delta = np.cos(delta) + + # Pick up proper motion terms. See notes for hip_main and tyc_main below. + # Radians per year. + mu_alpha_cos_delta = float(entry[5]) + mu_delta = float(entry[6]) + + # See notes below. + if cos_delta > 0.05: + mu_alpha = mu_alpha_cos_delta / cos_delta + else: + mu_alpha = 0 + mu_delta = 0 + + ra = alpha + mu_alpha * (epoch_proper_motion - pm_origin) + dec = delta + mu_delta * (epoch_proper_motion - pm_origin) + star_table[i,:] = ([ra, dec, 0, 0, 0, mag]) + star_catID[i] = np.uint16(entry[0]) + elif star_catalog in ('hip_main', 'tyc_main'): + # The Hipparcos and Tycho catalogs uses International Celestial + # Reference System (ICRS) which is essentially J2000. See + # https://cdsarc.u-strasbg.fr/ftp/cats/I/239/version_cd/docs/vol1/sect1_02.pdf + # section 1.2.1 for details. + with open(catalog_file_full_pathname, 'r') as star_catalog_file: + reader = csv.reader(star_catalog_file, delimiter='|') + incomplete_entries = 0 + for (i, entry) in enumerate(reader): + # Skip this entry if mag, ra, or dec are empty. + if entry[5].isspace() or entry[8].isspace() or entry[9].isspace(): + incomplete_entries += 1 + continue + # If propagating, skip if proper motions are empty. + if epoch_proper_motion != pm_origin \ + and (entry[12].isspace() or entry[13].isspace()): + incomplete_entries += 1 + continue + mag = float(entry[5]) + # RA/Dec in degrees at 1991.25 proper motion start. + alpha = float(entry[8]) + delta = float(entry[9]) + cos_delta = np.cos(np.deg2rad(delta)) + + mu_alpha = 0 + mu_delta = 0 + if epoch_proper_motion != pm_origin: + # Pick up proper motion terms. Note that the pmRA field is + # "proper motion in right ascension"; see + # https://en.wikipedia.org/wiki/Proper_motion; see also section + # 1.2.5 in the cdsarc.u-strasbg document cited above. + + # The 1000/60/60 term converts milliarcseconds per year to + # degrees per year. + mu_alpha_cos_delta = float(entry[12])/1000/60/60 + mu_delta = float(entry[13])/1000/60/60 + + # Divide the pmRA field by cos_delta to recover the RA proper + # motion rate. Note however that near the poles (delta near plus + # or minus 90 degrees) the cos_delta term goes to zero so dividing + # by cos_delta is problematic there. + # Section 1.2.9 of the cdsarc.u-strasbg document cited above + # outlines a change of coordinate system that can overcome + # this problem; we simply punt on proper motion near the poles. + if cos_delta > 0.05: + mu_alpha = mu_alpha_cos_delta / cos_delta + else: + # abs(dec) > ~87 degrees. Ignore proper motion. + mu_alpha = 0 + mu_delta = 0 + + ra = np.deg2rad(alpha + mu_alpha * (epoch_proper_motion - pm_origin)) + dec = np.deg2rad(delta + mu_delta * (epoch_proper_motion - pm_origin)) + star_table[i,:] = ([ra, dec, 0, 0, 0, mag]) + # Find ID, depends on the database + if star_catalog == 'hip_main': + star_catID[i] = np.uint32(entry[1]) + else: # is tyc_main + star_catID[i, :] = [np.uint16(x) for x in entry[1].split()] + + if incomplete_entries: + logger.info('Skipped %i incomplete entries.' % incomplete_entries) + + # Remove entries in which RA and Dec are both zero + # (i.e. keep entries in which either RA or Dec is non-zero) + kept = np.logical_or(star_table[:, 0] != 0, star_table[:, 1] != 0) + star_table = star_table[kept, :] + brightness_ii = np.argsort(star_table[:, 5]) + star_table = star_table[brightness_ii, :] # Sort by brightness + num_entries = star_table.shape[0] + # Trim and order catalogue ID array to match + if star_catalog in ('bsc5', 'hip_main'): + star_catID = star_catID[kept][brightness_ii] + else: + star_catID = star_catID[kept, :][brightness_ii, :] + + logger.info('Loaded %d stars' % num_entries) + return (star_table, star_catID, epoch_equinox) + + def generate_database(self, max_fov, min_fov=None, save_as=None, + star_catalog='hip_main', + lattice_field_oversampling=100, patterns_per_lattice_field=50, + verification_stars_per_fov=150, star_max_magnitude=None, + pattern_max_error=.001, + multiscale_step=1.5, epoch_proper_motion='now', + pattern_stars_per_fov=None, linear_probe=False): + """Create a database and optionally save it to file. + + Takes a few minutes for a small (large FOV) database, can take many hours for a large + (small FOV) database. The primary knowledge necessary is the FOV you want the database + to work for and the highest magnitude of stars you want to include. + + For a single application, set max_fov equal to your known FOV. Alternatively, set + max_fov and min_fov to the range of FOVs you want the database to be built for. For + large difference in max_fov and min_fov, a multiscale database will be built where + patterns of several different sizes on the sky will be included. + + Note: + If you wish to build you own database you must download a star catalogue. tetra3 + supports three options, where the 'hip_main' is the default and recommended + database to use: + * The 285KB Yale Bright Star Catalog 'BSC5' containing 9,110 stars. This is complete to + to about magnitude seven and is sufficient for >10 deg field-of-view setups. + * The 51MB Hipparcos Catalogue 'hip_main' containing 118,218 stars. This contains about + three stars per square degree and is sufficient down to about >3 deg field-of-view. + * The 355MB Tycho Catalogue 'tyc_main' (also from the Hipparcos satellite mission) + containing 1,058,332 stars. This is complete to magnitude 10 and is sufficient + for all tetra3 databases. + The 'BSC5' data is avaiable from (use + byte format file) and 'hip_main' and 'tyc_main' are available from + (save the appropriate .dat file). The + downloaded catalogue must be placed in the tetra3/tetra3 directory. + + Example, the default database was generated with: + :: + + # Create instance + t3 = tetra3.Tetra3() + # Generate and save database + t3.generate_database(max_fov=30, min_fov=10, save_as='default_database') + + If you know your FOV, set max_fov to this value and leave min_fov as None. The example above + takes less than 7 minutes to build on RPi4. + + Note on celestial coordinates: The RA/Dec values incorporated into the database + are expressed in the same celestial coordinate system as the input catalog. For + hip_main and tyc_main this is J2000; for bsc5 this is also J2000 (but could be + B1950 for older Bright Star Catalogs). The solve_from_image() function returns its + solution's RA/Dec values along with the equinox epoch of the database's catalog. + + Notes on proper motion: star catalogs include stellar proper motion data. This + means they give each star's position as of a specified year (1991.25 for hip_main + and tyc_main; 2000(?) for bsc5). In addition, for each star, the annual rate of + motion in RA/Dec is also given. This allows generate_database() to output a + database with stellar positions propagated to the year in which the database was + generated (by default; see below). Some stars don't have proper motions in the + catalogue and will therefore be excluded from the database, however, you can set + epoch_proper_motion=None to disable this propagation and all stars will be + included. The field 'epoch_proper_motion' of the database properties identifies + the epoch for which the star positions are valid. + + Theoretically, when passing an image to solve_from_image(), the database's + epoch_proper_motion should be the same as the time at which the image was taken. + In practice, this is generally unimportant because most stars' proper motion is + very small. One exception: for very small fields of view (high magnification), + even small proper motions can be significant. Another exception: when solving + historical images. In both cases, you should arrange to use a database built with + a epoch_proper_motion similar to the image's vintage. + + About patterns, pattern keys, and collisions: + + Tetra3 refers to a grouping of four stars as a "pattern", and assigns each pattern + a pattern key as follows: + + 1. Calculate the six edge distances between each pair of stars in the pattern. + 2. Normalise by the longest edge to create a set of five numbers each between zero and + one. + 3. Order the five edge ratios. + 4. Quantize each edge ratio into a designated number of bins. + 5. Concatenate the five ordered and quantized edge ratios to form the key for the + pattern. + + When solving an image, tetra3 forms patterns from 4-groups of stars in the image, + computes each pattern's key in the same manner, and use these pattern keys to look + up the corresponding database pattern (or patterns, see next). The location of + stars in the database pattern and other nearby catalog stars are used to validate + the match in the image. + + Note that it is possible for multiple distinct patterns to share the same key; + this happens more frequently as the number of quantization bins in step 4 is + reduced. When multiple patterns share the same key we call this a "pattern key + collision". When solving an image, pattern key collisions increase the number of + database patterns to be validated as a match against the image's star patterns. + + In theory, a python dict could be used to map from pattern key value to the list + of patterns with that key value. Howver, catalog databases can easily contain + millions of patterns, so in practice such a pattern dict would occupy an + uncomfortably large amount of memory. + + Tetra3 instead uses an efficient array representation of its patterns, with each + pattern key value being hashed (*) to form an index into the pattern array. + Mapping the large space of possible pattern key values to the modest range of + pattern array indices induces further collisions. Because the pattern array is + allocated to larger than the number of patterns, the additional hash table + collisions induced are modest. + + * We have two hashing concepts in play. The first is "geometric hashing" from the + field of object recognition and pattern matching + (https://en.wikipedia.org/wiki/Geometric_hashing), where a 4-star pattern is + distilled to our pattern key, a 5-tuple of quantized edge ratios. The second is a + "hash table" (https://en.wikipedia.org/wiki/Hash_table) where the pattern key is + hashed to index into a compact table of all of the star patterns. + + Args: + max_fov (float): Maximum angle (in degrees) between stars in the same pattern. + min_fov (float, optional): Minimum FOV considered when the catalogue density is + trimmed to size. If None (the default), min_fov will be set to max_fov, i.e. + a catalogue for a single application is generated (this is most efficient size + and speed wise). + save_as (str or pathlib.Path, optional): Save catalogue here when finished. Calls + :meth:`save_database`. + star_catalog (string, optional): Abbreviated name of star catalog, one of 'bsc5', + 'hip_main', or 'tyc_main'. Default 'hip_main'. + lattice_field_oversampling (int, optional): When uniformly distributing pattern + generation fields over the celestial sphere, this determines the overlap factor. + Default is 100. + patterns_per_lattice_field (int, optional): The number of patterns generated for each + lattice field. Typical values are 20 to 100; default is 50. + verification_stars_per_fov (int, optional): Target number of stars used for generating + patterns in each FOV region. Also used to limit the number of stars considered for + matching in solve images. Typical values are large; default is 150. + star_max_magnitude (float, optional): Dimmest apparent magnitude of stars retained + from star catalog. None (default) causes the limiting magnitude to be computed + based on `min_fov` and `verification_stars_per_fov`. + pattern_max_error (float, optional): This value determines the number of bins into which + a pattern key's edge ratios are each quantized: + pattern_bins = 0.25 / pattern_max_error + Default .001, corresponding to pattern_bins=250. For a database with limiting + magnitude 7, this yields a reasonable pattern key collision rate. + multiscale_step (float, optional): Determines the largest ratio between subsequent FOVs + that is allowed when generating a multiscale database. Defaults to 1.5. If the ratio + max_fov/min_fov is less than sqrt(multiscale_step) a single scale database is built. + epoch_proper_motion (string or float, optional): Determines the end year to which + stellar proper motions are propagated. If 'now' (default), the current year is used. + If 'none' or None, star motions are not propagated and this allows catalogue entries + without proper motions to be used in the database. + pattern_stars_per_fov (int, optional): Deprecated. If given, is used instead of + `lattice_field_oversampling`, which has similar values. + linear_probe (bool, optional): If False (default), uses quadratic probing in the + hash table. This is appropriate for deployments where you expect the pattern + database to fit entirely in RAM. Use linear_probe=True when you expect the + pattern database to be too large to fit in RAM. + + """ + self._logger.debug('Got generate pattern catalogue with input: ' + + str((max_fov, min_fov, save_as, star_catalog, + lattice_field_oversampling, + patterns_per_lattice_field, verification_stars_per_fov, + star_max_magnitude, pattern_max_error, + multiscale_step, epoch_proper_motion, linear_probe))) + if pattern_stars_per_fov is not None and pattern_stars_per_fov != lattice_field_oversampling: + self._logger.warning( + 'pattern_stars_per_fov value %s is overriding lattice_field_oversampling value %s' % + (pattern_stars_per_fov, lattice_field_oversampling)) + lattice_field_oversampling = pattern_stars_per_fov + + # If True, measures and logs collisions (pattern key, hash table). + EVALUATE_COLLISIONS = False + + star_catalog, catalog_file_full_pathname = self._build_catalog_path(star_catalog) + + max_fov = np.deg2rad(float(max_fov)) + if min_fov is None: + min_fov = max_fov + else: + min_fov = np.deg2rad(float(min_fov)) + + # Making lattice_field_oversampling larger yields more patterns, with diminishing + # returns. + # value fraction of patterns found compared to lattice_field_oversampling=100000 + # 100 0.61 + # 1000 0.86 + # 10000 0.96 + lattice_field_oversampling = int(lattice_field_oversampling) + + patterns_per_lattice_field = int(patterns_per_lattice_field) + verification_stars_per_fov = int(verification_stars_per_fov) + linear_probe = bool(linear_probe) + if star_max_magnitude is not None: + star_max_magnitude = float(star_max_magnitude) + PATTERN_SIZE = 4 + pattern_bins = round(1/4/pattern_max_error) + if epoch_proper_motion is None or str(epoch_proper_motion).lower() == 'none': + epoch_proper_motion = None + self._logger.debug('Proper motions will not be considered') + elif isinstance(epoch_proper_motion, Number): + self._logger.debug('Use proper motion epoch as given') + elif str(epoch_proper_motion).lower() == 'now': + epoch_proper_motion = datetime.utcnow().year + self._logger.debug('Proper motion epoch set to now: ' + str(epoch_proper_motion)) + else: + raise ValueError('epoch_proper_motion value %s is forbidden' % epoch_proper_motion) + + star_table, star_catID, epoch_equinox = Tetra3._load_catalog( + star_catalog, + catalog_file_full_pathname, + epoch_proper_motion, + self._logger, + ) + + if star_max_magnitude is None: + # Compute the catalog magnitude cutoff based on the required star density. + + # First, characterize the catalog star brightness distribution. + mag_histo_values, mag_histo_edges = np.histogram(star_table[:, 5], bins=100) + index_of_peak = np.argmax(mag_histo_values) + catalog_mag_limit = mag_histo_edges[index_of_peak] + catalog_mag_max = mag_histo_edges[-1] + self._logger.debug('Catalog star counts peak: mag=%.1f' % catalog_mag_limit) + + # How many FOVs are in the entire sky? + num_fovs = num_fields_for_sky(min_fov) + + # The total number of stars needed. + total_stars_needed = num_fovs * verification_stars_per_fov + + # Empirically determined fudge factor. With this, the star_max_magnitude is + # about 0.5 magnitude fainter than the dimmest pattern star. + total_stars_needed *= 0.7 + + cumulative = np.cumsum(mag_histo_values) + mag_index = np.where(cumulative > total_stars_needed)[0] + if mag_index.size == 0: + star_max_magnitude = catalog_mag_max + else: + star_max_magnitude = mag_histo_edges[mag_index[0]] + if star_max_magnitude > catalog_mag_limit: + self._logger.warning('Catalog magnitude limit %.1f is too low to provide %d stars' % + (catalog_mag_limit, total_stars_needed)) + + kept = star_table[:, 5] <= star_max_magnitude + star_table = star_table[kept, :] + if star_catalog in ('bsc5', 'hip_main'): + star_catID = star_catID[kept] + else: + star_catID = star_catID[kept, :] + + num_entries = star_table.shape[0] + self._logger.info('Kept %d stars brighter than magnitude %.1f.' % + (num_entries, star_max_magnitude)) + + # Calculate star direction vectors. + for i in range(0, num_entries): + vector = np.array([np.cos(star_table[i, 0])*np.cos(star_table[i, 1]), + np.sin(star_table[i, 0])*np.cos(star_table[i, 1]), + np.sin(star_table[i, 1])]) + star_table[i, 2:5] = vector + + # Insert all stars in a KD-tree for fast neighbour lookup + all_star_vectors = star_table[:, 2:5] + vector_kd_tree = KDTree(all_star_vectors) + + # Calculate set of FOV scales to create patterns at + fov_ratio = max_fov/min_fov + def logk(x, k): + return np.log(x) / np.log(k) + fov_divisions = np.ceil(logk(fov_ratio, multiscale_step)).astype(int) + 1 + if fov_ratio < np.sqrt(multiscale_step): + pattern_fovs = [max_fov] + else: + pattern_fovs = np.exp2(np.linspace(np.log2(min_fov), np.log2(max_fov), fov_divisions)) + self._logger.info('Generating patterns at FOV scales: ' + str(np.rad2deg(pattern_fovs))) + + # Theory of operation: + # + # We want our 4-star patterns to satisfy three criteria: be well distributed over the + # sky; favor bright stars; and have size commensurate with the FOV. + # + # Well distributed: we establish this by creating a set of FOV-sized "lattice fields" + # uniformly distributed over the celestial sphere. Within each lattice field we generate a + # fixed number (`patterns_per_lattice_field`, typically 50) of patterns, thus ensuring that + # all parts of the sky have the same density of database patterns. Because a solve-time + # field of view might not line up with a lattice field, a `lattice_field_oversampling` + # parameter (typically 100) is used to increase the number of lattice fields, overlapping + # them. + # + # Favor bright stars: within each lattice field, nearly all (see below) sky catalog stars in + # the lattice field are considered. Working with the brightest stars first, we form the + # desired number (`patterns_per_lattice_field`) of 4-star subsets within the field. + # + # Sized for FOV: in each lattice field, we form patterns using stars within FOV/2 radius of + # the field's center, thus ensuring that the resulting patterns will not be too large for + # the FOV. Because we work with the brightest stars first, most of the time patterns won't + # be too small for the FOV because brighter stars occur less frequently and are thus further + # apart on average. + # + # In the previous paragraph we appeal to the power law spatial distrbution of stars by + # brightness, so usually if we choose the brightest stars in a lattice field, the resulting + # patterns won't be tiny (because bright stars are spaced apart on average). However, + # clusters of bright stars do occur and if we aren't careful we could end up using up most + # of the `patterns_per_lattice_field` budget generating tiny patterns among the brightest + # cluster stars. + # + # Consider M45 (Pleiades). Within its roughly one degree diameter core, it has more than ten + # bright stars. 10 choose 4 is 210, so if patterns_per_lattice_field=50, we will generate + # all the patterns from the brightest Pleiades' stars. If the FOV is 10 degrees, these patterns + # will be of limited utility for plate solving because they are all very small relative to + # the FOV. + # + # We address this problem by applying a `pattern_stars_separation` constraint to the sky + # catalog stars before choosing a lattice field's pattern stars. In our 10 degree FOV + # example, a pattern_stars_separation of 1/2 degree creates an "exclusion zone" around each + # of the Pleiades brightest stars, leaving us with only the 5 or 6 most separated bright + # Pleiades stars. 6 choose 4 is just 15, so if `patterns_per_lattice_field` is larger than + # this (50 is typical), we'll generate plenty of patterns that include stars other than only + # the Pleiades members. + # + # A similar "cluster buster" step is performed at solve time, eliminating centroids that + # are too closely spaced. + + # Set of deduped patterns found, to be populated across all FOVs. + pattern_list = set() + for pattern_fov in reversed(pattern_fovs): + keep_for_patterns_at_fov = np.full(num_entries, False) + if fov_divisions == 1: + # Single scale database, trim to min_fov, make patterns up to max_fov + pattern_stars_separation = separation_for_density( + min_fov, verification_stars_per_fov) + else: + # Multiscale database, trim and make patterns iteratively at smaller FOVs + pattern_stars_separation = separation_for_density( + pattern_fov, verification_stars_per_fov) + self._logger.info('At FOV %s separate pattern stars by %.2f deg.' % + (round(np.rad2deg(pattern_fov), 5), + np.rad2deg(pattern_stars_separation))) + pattern_stars_dist = _distance_from_angle(pattern_stars_separation) + + # Loop through all stars in database, gather pattern stars for this FOV. + for star_ind in range(num_entries): + vector = all_star_vectors[star_ind, :] + # Check if any kept pattern stars are within the separation. + within_pattern_separation = vector_kd_tree.query_ball_point( + vector, pattern_stars_dist) + occupied_for_pattern = np.any(keep_for_patterns_at_fov[within_pattern_separation]) + # If there isn't a pattern star too close, add this to the pattern table. + if not occupied_for_pattern: + keep_for_patterns_at_fov[star_ind] = True + self._logger.info('Pattern stars at this FOV: %s.' % np.sum(keep_for_patterns_at_fov)) + + # Clip out tables of the kept stars. + pattern_star_table = star_table[keep_for_patterns_at_fov, :] + + # Insert pattern stars into KD tree for lattice field lookup. + pattern_kd_tree = KDTree(pattern_star_table[:, 2:5]) + + # Index conversion from pattern star_table to main star_table + pattern_index = np.nonzero(keep_for_patterns_at_fov)[0].tolist() + + # To ensure good coverage of patterns for the largest FOV of interest, you can just + # specify a somewhat larger `max_fov`. + fov_angle = pattern_fov / 2 + fov_dist = _distance_from_angle(fov_angle) + + # Enumerate all lattice fields over the celestial sphere. + total_field_pattern_stars = 0 + total_added_patterns = 0 + total_pattern_avg_mag = 0 + max_pattern_mag = -1 + min_stars_per_lattice_field = len(pattern_star_table) # Exceeds any possible value. + num_lattice_fields = 0 + n = num_fields_for_sky(pattern_fov) * lattice_field_oversampling + for lattice_field_center_vector in fibonacci_sphere_lattice(n): + # Find all pattern stars within lattice field. + field_pattern_stars = pattern_kd_tree.query_ball_point(lattice_field_center_vector, fov_dist) + min_stars_per_lattice_field = min(len(field_pattern_stars), min_stars_per_lattice_field) + total_field_pattern_stars += len(field_pattern_stars) + num_lattice_fields += 1 + # Change to main star_table indices. + field_pattern_stars = [pattern_index[n] for n in field_pattern_stars] + field_pattern_stars.sort() # Brightness order. + + # Check all possible patterns in overall brightness order until we've accepted + # 'patterns_per_lattice_field' patterns. + patterns_this_lattice_field = 0 + for pattern in breadth_first_combinations(field_pattern_stars, PATTERN_SIZE): + len_before = len(pattern_list) + pattern_list.add(tuple(pattern)) # Add to set, deduping. + if len(pattern_list) > len_before: + total_added_patterns += 1 + total_mag = sum(star_table[p, 5] for p in pattern) + total_pattern_avg_mag += total_mag / PATTERN_SIZE + max_pattern_mag = max(star_table[pattern[-1], 5], max_pattern_mag) + if len(pattern_list) % 100000 == 0: + self._logger.info('Generated %s patterns so far.' % len(pattern_list)) + + patterns_this_lattice_field += 1 + if patterns_this_lattice_field >= patterns_per_lattice_field: + break + + self._logger.info( + 'avg/min pattern stars per lattice field %.2f/%d; avg/max pattern mag %.2f/%.2f' % + (total_field_pattern_stars / num_lattice_fields, + min_stars_per_lattice_field, + total_pattern_avg_mag / total_added_patterns, + max_pattern_mag)) + + pattern_list = list(pattern_list) + self._logger.info('Found %s patterns in total.' % len(pattern_list)) + + # Don't need this anymore. + del pattern_kd_tree + + # Create all pattern keys by calculating, sorting, and binning edge ratios; then compute + # a table index hash from the pattern key, and store the table index -> pattern mapping. + self._logger.info('Start building catalogue.') + if linear_probe: + catalog_length = int(_next_prime(3 * len(pattern_list))) + else: + catalog_length = int(_next_prime(2 * len(pattern_list))) + # Determine type to make sure the biggest index will fit, create pattern catalogue + max_index = np.max(np.array(pattern_list)) + if max_index <= np.iinfo('uint8').max: + pattern_catalog = np.zeros((catalog_length, PATTERN_SIZE), dtype=np.uint8) + elif max_index <= np.iinfo('uint16').max: + pattern_catalog = np.zeros((catalog_length, PATTERN_SIZE), dtype=np.uint16) + else: + pattern_catalog = np.zeros((catalog_length, PATTERN_SIZE), dtype=np.uint32) + self._logger.info('Catalog size %s and type %s.' % + (pattern_catalog.shape, pattern_catalog.dtype)) + + pattern_largest_edge = np.zeros(catalog_length, dtype=np.float16) + pattern_key_hashes = np.zeros(catalog_length, dtype=np.uint16) + + # Gather collision information. + pattern_keys_seen = set() + pattern_key_collisions = 0 + + # Go through each pattern and insert to the catalogue + for (pat_index, pattern) in enumerate(pattern_list): + if pat_index % 100000 == 0 and pat_index > 0: + self._logger.info('Inserting pattern number: ' + str(pat_index)) + + # retrieve the vectors of the stars in the pattern + vectors = [star_table[p, 2:5].tolist() for p in pattern] + + edge_angles = [2.0 * math.asin(0.5 * math.dist(vectors[i], vectors[j])) + for i, j in itertools.combinations(range(4), 2)] + edge_angles_sorted = sorted(edge_angles) + largest_angle = edge_angles_sorted[-1] + edge_ratios = [angle / largest_angle for angle in edge_angles_sorted[:-1]] + + # Convert edge ratio float to pattern key by binning. + pattern_key = [int(ratio * pattern_bins) for ratio in edge_ratios] + pattern_key_hash = _compute_pattern_key_hash(pattern_key, pattern_bins) + hash_index = _pattern_key_hash_to_index( + pattern_key_hash, catalog_length, linear_probe) + + if EVALUATE_COLLISIONS: + prev_len = len(pattern_keys_seen) + pattern_keys_seen.add(tuple(pattern_key)) + if prev_len == len(pattern_keys_seen): + pattern_key_collisions += 1 + + # Presort patterns. + # Find the centroid, or average position, of the star pattern. + pattern_centroid = list(map(lambda a : sum(a) / len(a), zip(*vectors))) + + # Calculate each star's radius, or Euclidean distance from the centroid. + + # Elements: (distance, index in pattern). + centroid_distances = [ + (sum((x1 - x2) * (x1 - x2) for (x1, x2) in zip(v, pattern_centroid)), index) + for index, v in enumerate(vectors)] + centroid_distances.sort() + # Use the radii to uniquely order the pattern, used for future matching. + pattern = [pattern[i] for (_, i) in centroid_distances] + + index = _insert_at_index(pattern, hash_index, pattern_catalog, linear_probe) + pattern_key_hashes[index] = np.uint16(int(pattern_key_hash) & 0xffff) + # Store as milliradian to better use float16 range. + pattern_largest_edge[index] = largest_angle*1000 + + total_probes = 0 + max_probes = 0 + if EVALUATE_COLLISIONS: + # Evaluate average hash table probe count. + for pattern_key in pattern_keys_seen: + pattern_key_hash = _compute_pattern_key_hash( + pattern_key, pattern_bins) + hash_index = _pattern_key_hash_to_index( + pattern_key_hash, catalog_length, linear_probe) + hash_match_inds = _get_table_indices_from_hash( + hash_index, pattern_catalog, linear_probe) + probes = len(hash_match_inds) + total_probes += probes + if probes > max_probes: + max_probes = probes + + self._logger.info('Finished generating database.') + self._logger.info('Size of uncompressed star table: %i Bytes.' %star_table.nbytes) + self._logger.info('Size of uncompressed pattern catalog: %i Bytes.' %pattern_catalog.nbytes) + if EVALUATE_COLLISIONS: + self._logger.info('Pattern key collisions: %s; average/max hash table probe len: %.2f/%d' + % (pattern_key_collisions, + total_probes / len(pattern_keys_seen), + max_probes)) + self._star_table = star_table + self._star_kd_tree = vector_kd_tree + self._star_catalog_IDs = star_catID + self._pattern_catalog = pattern_catalog + self._pattern_largest_edge = pattern_largest_edge + self._pattern_key_hashes = pattern_key_hashes + self._db_props['pattern_mode'] = 'edge_ratio' + self._db_props['hash_table_type'] = 'linear_probe' if linear_probe else 'quadratic_probe' + self._db_props['pattern_size'] = PATTERN_SIZE + self._db_props['pattern_bins'] = pattern_bins + self._db_props['pattern_max_error'] = pattern_max_error + self._db_props['max_fov'] = np.rad2deg(max_fov) + self._db_props['min_fov'] = np.rad2deg(min_fov) + self._db_props['star_catalog'] = star_catalog + self._db_props['epoch_equinox'] = epoch_equinox + self._db_props['epoch_proper_motion'] = epoch_proper_motion + self._db_props['lattice_field_oversampling'] = lattice_field_oversampling + self._db_props['anchor_stars_per_fov'] = lattice_field_oversampling # legacy + self._db_props['pattern_stars_per_fov'] = lattice_field_oversampling # legacy + self._db_props['patterns_per_lattice_field'] = patterns_per_lattice_field + self._db_props['patterns_per_anchor_star'] = patterns_per_lattice_field # legacy + self._db_props['verification_stars_per_fov'] = verification_stars_per_fov + self._db_props['star_max_magnitude'] = star_max_magnitude + self._db_props['simplify_pattern'] = True # legacy + self._db_props['range_ra'] = None + self._db_props['range_dec'] = None + self._db_props['presort_patterns'] = True # legacy + self._db_props['num_patterns'] = len(pattern_list) + self._logger.debug(self._db_props) + + if save_as is not None: + self._logger.debug('Saving generated database as: ' + str(save_as)) + self.save_database(save_as) + else: + self._logger.info('Skipping database file generation.') + + def solve_from_image(self, image, fov_estimate=None, fov_max_error=None, + match_radius=.01, match_threshold=1e-5, + solve_timeout=5000, target_pixel=None, target_sky_coord=None, distortion=0, + return_matches=False, return_visual=False, match_max_error=.002, + pattern_checking_stars=None, **kwargs): + """Solve for the sky location of an image. + + Star locations (centroids) are found using :meth:`tetra3.get_centroids_from_image` and + keyword arguments are passed along to this method. Every 4-star combination of the + found stars found is checked against the database before giving up (or the `solve_timeout` + is reached). + + Example: + :: + + # Create dictionary with desired extraction settings + extract_dict = {'min_sum': 250, 'max_axis_ratio': 1.5} + # Solve for image + result = t3.solve_from_image(image, **extract_dict) + + Args: + image (PIL.Image): The image to solve for, must be convertible to numpy array. + fov_estimate (float, optional): Estimated horizontal field of view of the image in + degrees. + fov_max_error (float, optional): Maximum difference in field of view from the estimate + allowed for a match in degrees. + match_radius (float, optional): Maximum distance to a star to be considered a match + as a fraction of the image field of view. + match_threshold (float, optional): Maximum allowed false-positive probability to accept + a tested pattern a valid match. Default 1e-5. + solve_timeout (float, optional): Timeout in milliseconds after which the solver will + give up on matching patterns. Defaults to 5000 (5 seconds). + target_pixel (numpy.ndarray, optional): Pixel coordinates to return RA/Dec for in + addition to the default (the centre of the image). Size (N,2) where each row is the + (y, x) coordinate measured from top left corner of the image. Defaults to None. + target_sky_coord (numpy.ndarray, optional): Sky coordinates to return image (y, x) for. + Size (N,2) where each row is the (RA, Dec) in degrees. Defaults to None. + distortion (float, optional): Set the estimated distortion of the image. + Negative distortion is barrel, positive is pincushion. Given as amount of distortion + at width/2 from centre. Can set to None to disable distortion calculation entirely. + Default 0. + return_matches (bool, optional): If set to True, the catalogue entries of the mached + stars and their pixel coordinates in the image is returned. + return_visual (bool, optional): If set to True, an image is returned that visualises + the solution. + match_max_error (float, optional): Maximum difference allowed in pattern for a match. + If None, uses the 'pattern_max_error' value from the database. + pattern_checking_stars: No longer meaningful, ignored. + **kwargs (optional): Other keyword arguments passed to + :meth:`tetra3.get_centroids_from_image`. + + Returns: + dict: A dictionary with the following keys is returned: + - 'RA': Right ascension of centre of image in degrees. + - 'Dec': Declination of centre of image in degrees. + - 'Roll': Rotation in degrees of celestial north relative to image's "up" + direction (towards y=0). Zero when north and up coincide; a positive + roll angle means north is counter-clockwise from image "up". + - 'FOV': Calculated horizontal field of view of the provided image. + - 'distortion': Calculated distortion of the provided image. Omitted if + the caller's distortion estimate is None. + - 'RMSE': RMS residual of matched stars in arcseconds. + - 'P90E': 90 percentile matched star residual in arcseconds. + - 'MAXE': Maximum matched star residual in arcseconds. + - 'Matches': Number of stars in the image matched to the database. + - 'Prob': Probability that the solution is a false-positive. + - 'epoch_equinox': The celestial RA/Dec equinox reference epoch. + - 'epoch_proper_motion': The epoch the database proper motions were propageted to. + - 'T_solve': Time spent searching for a match in milliseconds. + - 'T_extract': Time spent exctracting star centroids in milliseconds. + - 'RA_target': Right ascension in degrees of the pixel positions passed in + target_pixel. Not included if target_pixel=None (the default). + - 'Dec_target': Declination in degrees of the pixel positions in target_pixel. + Not included if target_pixel=None (the default). + - 'x_target': image x coordinates for the sky positions passed in target_sky_coord. + If a sky position is outside of the field of view, the corresponding x_target + entry will be None. Not included if target_sky_coord=None (the default). + - 'y_target': image y coordinates for the sky positions passed in target_sky_coord. + If a sky position is outside of the field of view, the corresponding y_target + entry will be None. Not included if target_sky_coord=None (the default). + - 'matched_stars': An Mx3 list with the (RA, Dec, magnitude) of the M matched stars + that were used in the solution. RA/Dec in degrees. Not included if + return_matches=False (the default). + - 'matched_centroids': An Mx2 list with the (y, x) pixel coordinates in the image + corresponding to each matched star. Not included if return_matches=False. + - 'matched_catID': The catalogue ID corresponding to each matched star. See + Tetra3.star_catalog_IDs for information on the format. Not included if + return_matches=False. + - 'pattern_centroids': similar to matched_centroids, except just for the pattern + stars. Not included if return_matches=False. + - 'visual': A PIL image with spots for the given centroids in white, the coarse + FOV and distortion estimates in orange, the final FOV and distortion + estimates in green. Also has circles for the catalogue stars in green or + red for successful/unsuccessful match. Not included if return_visual=False. + - 'status': One of: + MATCH_FOUND: solution was obtained + NO_MATCH: no match was found after exhausting all possibilities + TIMEOUT: the 'solve_timeout' was reached before a match could be found + CANCELLED: the solve operation was cancelled before a match could be found + TOO_FEW: the 'image' has too few detected stars to attempt a pattern match + + If unsuccessful in finding a match, None is returned for all keys of the + dictionary except 'T_solve' and 'status', and the optional return keys are missing. + + """ + assert self.has_database, 'No database loaded' + self._logger.debug('Got solve from image with input: ' + str( + (image, fov_estimate, fov_max_error, match_radius, + match_threshold, solve_timeout, target_pixel, target_sky_coord, distortion, + return_matches, return_visual, match_max_error, kwargs))) + (width, height) = image.size[:2] + self._logger.debug('Image (height, width): ' + str((height, width))) + + # Run star extraction, passing kwargs along + t0_extract = precision_timestamp() + centr_data = get_centroids_from_image(image, **kwargs) + t_extract = (precision_timestamp() - t0_extract)*1000 + # If we get a tuple, need to use only first element and then reassemble at return + if isinstance(centr_data, tuple): + centroids = centr_data[0] + else: + centroids = centr_data + self._logger.debug('Found this many centroids, in time: ' + str((len(centroids), t_extract))) + # Run centroid solver, passing arguments along (could clean up with kwargs handler) + solution = self.solve_from_centroids( + centroids, (height, width), fov_estimate=fov_estimate, fov_max_error=fov_max_error, + match_radius=match_radius, match_threshold=match_threshold, + solve_timeout=solve_timeout, target_pixel=target_pixel, + target_sky_coord=target_sky_coord, distortion=distortion, + return_matches=return_matches, return_visual=return_visual, + match_max_error=match_max_error) + # Add extraction time to results and return + solution['T_extract'] = t_extract + if isinstance(centr_data, tuple): + return (solution,) + centr_data[1:] + else: + return solution + + def solve_from_centroids(self, star_centroids, size, fov_estimate=None, fov_max_error=None, + match_radius=.01, match_threshold=1e-5, + solve_timeout=5000, target_pixel=None, target_sky_coord=None, + distortion=0, return_matches=False, return_catalog=False, + return_visual=False, return_rotation_matrix=False, + match_max_error=.002, pattern_checking_stars=None): + """Solve for the sky location using a list of centroids. + + Use :meth:`tetra3.get_centroids_from_image` or your own centroiding algorithm to + find an array of all the stars in your image and pass this result along with the + resolution of the image to this method. + + Every 4-star combination of the `star_centroids` found is checked against the + database before giving up (or the `solve_timeout` is reached). Since patterns + contain four stars, there will be N choose 4 (potentially a very large number!) + patterns tested against the database, so it is important to specify a meaningful + `solve_timeout`. + + Passing an estimated FOV and error bounds yields solutions much faster that letting tetra3 + figure it out. + + Example: + :: + + # Get centroids from image with custom parameters + centroids = get_centroids_from_image(image, simga=2, filtsize=30) + # Solve from centroids + result = t3.solve_from_centroids(centroids, size=image.size, fov_estimate=13) + + Args: + star_centroids (numpy.ndarray): (N,2) list of centroids, ordered by brightest first. + Each row is the (y, x) position of the star measured from the top left corner. + size (tuple of floats): (height, width) of the centroid coordinate system (i.e. + image resolution). + fov_estimate (float, optional): Estimated horizontal field of view of the image in + degrees. Default None. + fov_max_error (float, optional): Maximum difference in field of view from the estimate + allowed for a match in degrees. Default None. + match_radius (float, optional): Maximum distance to a star to be considered a match + as a fraction of the image field of view. Default 0.01. + match_threshold (float, optional): Maximum allowed false-positive probability to accept + a tested pattern a valid match. Default 1e-5. + solve_timeout (float, optional): Timeout in milliseconds after which the solver will + give up on matching patterns. Defaults to 5000 (5 seconds). + target_pixel (numpy.ndarray, optional): Pixel coordinates to return RA/Dec for in + addition to the default (the centre of the image). Size (N,2) where each row is the + (y, x) coordinate measured from top left corner of the image. Defaults to None. + target_sky_coord (numpy.ndarray, optional): Sky coordinates to return image (y, x) for. + Size (N,2) where each row is the (RA, Dec) in degrees. Defaults to None. + distortion (float, optional): Set the estimated distortion of the image. + Negative distortion is barrel, positive is pincushion. Given as amount of distortion + at width/2 from centre. Can set to None to disable distortion calculation entirely. + Default 0. + return_matches (bool, optional): If set to True, the catalogue entries of the matched + stars and their pixel coordinates in the image is returned. + return_catalog (bool, optional): If set to True, information about catalog stars in + the image's FOV is returned. + return_visual (bool, optional): If set to True, an image is returned that visualises + the solution. + return_rotation_matrix (bool, optional): If True, the 3x3 rotation matrix is returned. + match_max_error (float, optional): Maximum difference allowed in pattern for a match. + If None, uses the 'pattern_max_error' value from the database. + pattern_checking_stars: No longer meaningful, ignored. + + Returns: + dict: A dictionary with the following keys is returned: + - 'RA': Right ascension of centre of image in degrees. + - 'Dec': Declination of centre of image in degrees. + - 'Roll': Rotation in degrees of celestial north relative to image's "up" + direction (towards y=0). Zero when north and up coincide; a positive + roll angle means north is counter-clockwise from image "up". + - 'FOV': Calculated horizontal field of view of the provided image. + - 'distortion': Calculated distortion of the provided image. Omitted if + the caller's distortion estimate is None. + - 'RMSE': RMS residual of matched stars in arcseconds. + - 'P90E': 90 percentile matched star residual in arcseconds. + - 'MAXE': Maximum matched star residual in arcseconds. + - 'Matches': Number of stars in the image matched to the database. + - 'Prob': Probability that the solution is a false-positive. + - 'epoch_equinox': The celestial RA/Dec equinox reference epoch. + - 'epoch_proper_motion': The epoch the database proper motions were propageted to. + - 'T_solve': Time spent searching for a match in milliseconds. + - 'RA_target': Right ascension in degrees of the pixel positions passed in + target_pixel. Not included if target_pixel=None (the default). If a Kx2 array + of target_pixel was passed, this will be a length K list. + - 'Dec_target': Declination in degrees of the pixel positions in target_pixel. + Not included if target_pixel=None (the default). If a Kx2 array + of target_pixel was passed, this will be a length K list. + - 'x_target': image x coordinates for the sky positions passed in target_sky_coord. + If a sky position is outside of the field of view, the corresponding x_target + entry will be None. Not included if target_sky_coord=None (the default). + - 'y_target': image y coordinates for the sky positions passed in target_sky_coord. + If a sky position is outside of the field of view, the corresponding y_target + entry will be None. Not included if target_sky_coord=None (the default). + - 'matched_stars': An Mx3 list with the (RA, Dec, magnitude) of the M matched stars + that were used in the solution. RA/Dec in degrees. Not included if + return_matches=False (the default). + - 'matched_centroids': An Mx2 list with the (y, x) pixel coordinates in the image + corresponding to each matched star. Not included if return_matches=False. + - 'matched_catID': The catalogue ID corresponding to each matched star. See + Tetra3.star_catalog_IDs for information on the format. Not included if + return_matches=False. + - 'catalog_stars': A list of tuples (RA, Dec, magnitude, y, x). RA/Dec in degrees. + Not included if return_catalog=False. + - 'pattern_centroids': similar to matched_centroids, except just for the pattern + stars. Not included if return_matches=False. + - 'visual': A PIL image with spots for the given centroids in white, the coarse + FOV and distortion estimates in orange, the final FOV and distortion + estimates in green. Also has circles for the catalogue stars in green or + red for successful/unsuccessful match. Not included if return_visual=False. + - 'rotation_matrix' 3x3 rotation matrix. Not included if + return_rotation_matrix=False. + - 'status': One of: + MATCH_FOUND: solution was obtained + NO_MATCH: no match was found after exhausting all possibilities + TIMEOUT: the 'solve_timeout' was reached before a match could be found + CANCELLED: the solve operation was cancelled before a match could be found + TOO_FEW: the 'image' has too few detected stars to attempt a pattern match + + If unsuccessful in finding a match, None is returned for all keys of the + dictionary except 'T_solve' and 'status', and the optional return keys are missing. + + """ + assert self.has_database, 'No database loaded' + t0_solve = precision_timestamp() + self._logger.debug('Got solve from centroids with input: ' + + str((len(star_centroids), size, fov_estimate, fov_max_error, + match_radius, match_threshold, + solve_timeout, target_pixel, target_sky_coord, distortion, + return_matches, return_catalog, return_visual, match_max_error))) + if fov_estimate is None: + # If no FOV given at all, guess middle of the range for a start + fov_initial = np.deg2rad((self._db_props['max_fov'] + self._db_props['min_fov'])/2) + else: + fov_estimate = np.deg2rad(float(fov_estimate)) + fov_initial = fov_estimate + if fov_max_error is not None: + fov_max_error = np.deg2rad(float(fov_max_error)) + match_radius = float(match_radius) + match_threshold = float(match_threshold) / self.num_patterns + self._logger.debug('Set threshold to: ' + str(match_threshold) + ', have ' + + str(self.num_patterns) + ' patterns.') + if solve_timeout is not None: + # Convert to seconds to match timestamp + solve_timeout = float(solve_timeout) / 1000 + if target_pixel is not None: + target_pixel = np.array(target_pixel) + if target_pixel.ndim == 1: + # Make shape (2,) array to (1,2), to match (N,2) pattern + target_pixel = target_pixel[None, :] + if target_sky_coord is not None: + target_sky_coord = np.array(target_sky_coord) + if target_sky_coord.ndim == 1: + # Make shape (2,) array to (1,2), to match (N,2) pattern + target_sky_coord = target_sky_coord[None, :] + return_matches = bool(return_matches) + return_catalog = bool(return_catalog) + + # extract height (y) and width (x) of image + (height, width) = size[:2] + # Extract relevant database properties + verification_stars_per_fov = self._db_props['verification_stars_per_fov'] + p_size = self._db_props['pattern_size'] + p_bins = self._db_props['pattern_bins'] + if match_max_error is None or match_max_error < self._db_props['pattern_max_error']: + match_max_error = self._db_props['pattern_max_error'] + p_max_err = match_max_error + presorted = self._db_props['presort_patterns'] + linear_probe = self._db_props['hash_table_type'] == 'linear_probe' + + # Indices to extract from dot product matrix (above diagonal) + upper_tri_index = np.triu_indices(p_size, 1) + + num_centroids = len(star_centroids) + image_centroids = np.asarray(star_centroids) + if num_centroids < p_size: + return {'RA': None, 'Dec': None, 'Roll': None, 'FOV': None, 'distortion': None, + 'RMSE': None, 'P90E': None, 'MAXE': None, 'Matches': None, 'Prob': None, + 'epoch_equinox': None, 'epoch_proper_motion': None, 'T_solve': 0, + 'status': TOO_FEW} + + # Apply the same "cluster buster" thinning strategy as is used in database + # construction. + pattern_stars_separation_pixels = width * separation_for_density( + fov_initial, verification_stars_per_fov) / fov_initial + keep_for_patterns = np.full(num_centroids, False) + centroids_kd_tree = KDTree(image_centroids) + for ind in range(num_centroids): + centroid = image_centroids[ind, :] + within_separation = centroids_kd_tree.query_ball_point( + centroid, pattern_stars_separation_pixels) + occupied = np.any(keep_for_patterns[within_separation]) + # If there isn't a pattern star too close, add this to the pattern table. + if not occupied: + keep_for_patterns[ind] = True + pattern_centroids_inds = np.nonzero(keep_for_patterns)[0] + num_pattern_centroids = len(pattern_centroids_inds) + if num_pattern_centroids < num_centroids: + self._logger.debug('Trimmed %d pattern centroids to %d' % + (num_centroids, num_pattern_centroids)) + + if num_centroids > verification_stars_per_fov: + image_centroids = image_centroids[:verification_stars_per_fov, :] + self._logger.debug('Trimmed %d match centroids to %d' % + (num_centroids, len(image_centroids))) + num_centroids = len(image_centroids) + + if isinstance(distortion, (list, tuple)): + self._logger.warning('Tuple distortion %s no longer supported, ignoring' % distortion) + distortion = None + elif distortion is not None and not isinstance(distortion, Number): + self._logger.warning('Non-numeric distortion %s given, ignoring' % distortion) + distortion = None + + if distortion is None: + image_centroids_undist = image_centroids + else: + # If caller-estimated distortion, undistort centroids, then proceed as normal + image_centroids_undist = _undistort_centroids( + image_centroids, (height, width), k=distortion) + self._logger.debug('Undistorted centroids with k=%d' % distortion) + + # Compute star vectors using an estimate for the field-of-view in the x dimension + image_centroids_vectors = _compute_vectors( + image_centroids_undist, (height, width), fov_initial) + + catalog_lookup_count = 0 + catalog_eval_count = 0 + image_patterns_evaluated = 0 + search_space_explored = 0 + + # Try all `p_size` star combinations chosen from the image centroids, brightest first. + self._logger.debug('Checking up to %d image patterns from %d pattern centroids.' % + (math.comb(num_pattern_centroids, p_size), num_pattern_centroids)) + status = NO_MATCH + for image_pattern_indices in breadth_first_combinations(pattern_centroids_inds, p_size): + # Check if timeout has elapsed, then we must give up + if solve_timeout is not None: + elapsed_time = precision_timestamp() - t0_solve + if elapsed_time > solve_timeout: + self._logger.debug('Timeout reached after: %.2f sec.' % elapsed_time) + status = TIMEOUT + break + if self._cancelled: + elapsed_time = precision_timestamp() - t0_solve + self._logger.debug('Cancelled after: %.3f sec.' % elapsed_time) + status = CANCELLED + self._cancelled = False + break + + # Set largest distance to None, this is cached to avoid recalculating in future + # FOV estimation. + image_pattern_largest_distance = None + + image_pattern_vectors = image_centroids_vectors[image_pattern_indices, :] + # Calculate what the edge ratios are and broaden by p_max_err tolerance + edge_angles_sorted = np.sort(_angle_from_distance(pdist(image_pattern_vectors))) + image_pattern_largest_edge = edge_angles_sorted[-1] + image_pattern = edge_angles_sorted[:-1] / image_pattern_largest_edge + image_pattern_edge_ratio_min = image_pattern - p_max_err + image_pattern_edge_ratio_max = image_pattern + p_max_err + image_pattern_key = (image_pattern*p_bins).astype(int) + + image_patterns_evaluated += 1 + + # Possible range of pattern keys we need to look up + pattern_key_space_min = np.maximum(0, image_pattern_edge_ratio_min*p_bins).astype(int) + pattern_key_space_max = np.minimum(p_bins, image_pattern_edge_ratio_max*p_bins).astype(int) + # Make a list of the low/high values in each binned edge ratio position. + pattern_key_range = list(range(low, high + 1) for (low, high) in zip( + pattern_key_space_min, pattern_key_space_max)) + def dist(pattern_key): + return sum((a-b)*(a-b) for (a, b) in zip(pattern_key, image_pattern_key)) + + # Make a list of all pattern keys to explore; tag each with its distance from + # 'image_pattern_key' for sorting, so the first pattern key values we try are the + # ones closest to what we measured in the image to be solved. + pattern_key_list = list((dist(code), code) for code in itertools.product( + *pattern_key_range)) + pattern_key_list.sort() + + # Iterate over pattern keys, starting from 'image_pattern_key' and working + # our way outward. + for (_, pattern_key) in pattern_key_list: + search_space_explored += 1 + # Calculate corresponding hash index. + pattern_key_hash = _compute_pattern_key_hash(pattern_key, p_bins) + hash_index = _pattern_key_hash_to_index( + pattern_key_hash, self.pattern_catalog.shape[0], linear_probe) + + (catalog_pattern_edges, all_catalog_pattern_vectors) = \ + self._get_all_patterns_for_index( + pattern_key_hash, hash_index, upper_tri_index, + image_pattern_largest_edge, fov_estimate, + fov_max_error, linear_probe) + if catalog_pattern_edges is None: + continue + catalog_lookup_count += len(catalog_pattern_edges) + + all_catalog_largest_edges = catalog_pattern_edges[:, -1] + all_catalog_edge_ratios = (catalog_pattern_edges[:, :-1] / + all_catalog_largest_edges[:, None]) + + # Compare catalogue edge ratios to the min/max range from the image pattern. + valid_patterns = np.argwhere(np.all(np.logical_and( + image_pattern_edge_ratio_min < all_catalog_edge_ratios, + image_pattern_edge_ratio_max > all_catalog_edge_ratios), axis=1)).flatten() + + # Go through each matching pattern and calculate further + for index in valid_patterns: + catalog_eval_count += 1 + + # Compute the FOV that our image_pattern would yield if it were to + # match this pattern. + catalog_largest_edge = all_catalog_largest_edges[index] + if fov_estimate is not None: + # Can quickly correct FOV by scaling given estimate + fov = catalog_largest_edge / image_pattern_largest_edge * fov_initial + else: + # Use camera projection to calculate coarse fov + # The FOV estimate will be the same for each attempt with this pattern + # so we can cache the value by checking if we have already set it + if image_pattern_largest_distance is None: + image_pattern_largest_distance = np.max( + pdist(image_centroids_undist[image_pattern_indices, :])) + f = image_pattern_largest_distance / 2 / np.tan(catalog_largest_edge/2) + fov = 2*np.arctan(width/2/f) + + # Recalculate vectors using coarse FOV and uniquely sort them by + # distance from centroid + image_pattern_vectors = _compute_vectors( + image_centroids_undist[image_pattern_indices, :], (height, width), fov) + # find the centroid, or average position, of the star pattern + pattern_centroid = np.mean(image_pattern_vectors, axis=0) + # calculate each star's radius, or Euclidean distance from the centroid + pattern_radii = cdist(image_pattern_vectors, pattern_centroid[None, :]).flatten() + # use the radii to uniquely order the pattern's star vectors so they can be + # matched with the catalog vectors + image_pattern_vectors = np.array(image_pattern_vectors)[np.argsort(pattern_radii)] + + # Now get pattern vectors from catalogue, and sort if necessary + catalog_pattern_vectors = all_catalog_pattern_vectors[index, :] + if not presorted: + # find the centroid, or average position, of the star pattern + catalog_centroid = np.mean(catalog_pattern_vectors, axis=0) + # calculate each star's radius, or Euclidean distance from the centroid + catalog_radii = cdist(catalog_pattern_vectors, + catalog_centroid[None, :]).flatten() + # use the radii to uniquely order the catalog vectors + catalog_pattern_vectors = catalog_pattern_vectors[np.argsort(catalog_radii)] + + # Use the pattern match to find an estimate for the image's rotation matrix + rotation_matrix = _find_rotation_matrix(image_pattern_vectors, + catalog_pattern_vectors) + if np.linalg.det(rotation_matrix) < 0: + # Reject false positive due to implausible rotation matrix. + continue + + # Find all catalog star vectors inside the (diagonal) field of view for + # matching, in catalog brightness order. + image_center_vector = rotation_matrix[0, :] + fov_diagonal_rad = fov * np.sqrt(width**2 + height**2) / width + nearby_cat_star_inds = self._get_nearby_catalog_stars( + image_center_vector, fov_diagonal_rad/2) + nearby_cat_star_vectors = self.star_table[nearby_cat_star_inds, 2:5] + + # Derotate nearby catalog stars and get their (undistorted) centroids using + # coarse fov. + nearby_cat_star_vectors_derot = np.dot(rotation_matrix, + nearby_cat_star_vectors.T).T + (nearby_cat_star_centroids, kept) = _compute_centroids( + nearby_cat_star_vectors_derot, (height, width), fov) + nearby_cat_star_centroids = nearby_cat_star_centroids[kept, :] + nearby_cat_star_vectors = nearby_cat_star_vectors[kept, :] + nearby_cat_star_inds = nearby_cat_star_inds[kept] + # Only keep as many nearby stars as the image centroids. The 2x "fudge factor" + # is because image centroids brightness rankings might not match the nearby star + # catalog brightness rankings, so keeping some extra nearby stars helps ensure + # more matches. + nearby_cat_star_centroids = nearby_cat_star_centroids[:2*num_centroids] + nearby_cat_star_vectors = nearby_cat_star_vectors[:2*num_centroids] + nearby_cat_star_inds = nearby_cat_star_inds[:2*num_centroids] + num_nearby_catalog_stars = len(nearby_cat_star_centroids) + + # Match the image centroids to the nearby star centroids. + matched_stars = _find_centroid_matches( + image_centroids_undist, nearby_cat_star_centroids, width*match_radius) + num_extracted_stars = num_centroids + num_star_matches = len(matched_stars) + self._logger.debug("Number of nearby stars: %d, total matched: %d" \ + % (num_nearby_catalog_stars, num_star_matches)) + + # Probability that a single star is a mismatch (fraction of FOV area + # that are stars) + prob_single_star_mismatch = num_nearby_catalog_stars * match_radius**2 + # Probability that this rotation matrix's set of matches happen randomly + # we subtract two degrees of freedom + prob_mismatch = scipy.stats.binom.cdf(num_extracted_stars - (num_star_matches - 2), + num_extracted_stars, + 1 - prob_single_star_mismatch) + self._logger.debug("Mismatch probability = %.2e, at FOV = %.5fdeg" \ + % (prob_mismatch, np.rad2deg(fov))) + if prob_mismatch >= match_threshold: + continue + + # display mismatch probability in scientific notation + self._logger.debug("MATCH ACCEPTED") + self._logger.debug("Prob: %.4g, corr: %.4g" + % (prob_mismatch, prob_mismatch*self.num_patterns)) + + # Get the vectors for all matches in the image using coarse fov + matched_image_centroids_undist = image_centroids_undist[matched_stars[:, 0], :] + matched_image_vectors = _compute_vectors(matched_image_centroids_undist, + (height, width), fov) + matched_catalog_vectors = nearby_cat_star_vectors[matched_stars[:, 1], :] + # Recompute rotation matrix for more accuracy. The earlier rotation + # matrix was calculated using the pattern stars; the recomputed rotation + # matrix uses all star matches, not just the pattern stars. + rotation_matrix = _find_rotation_matrix(matched_image_vectors, + matched_catalog_vectors) + # Extract right ascension, declination, and roll from rotation matrix. + ra = np.rad2deg(np.arctan2(rotation_matrix[0, 1], + rotation_matrix[0, 0])) % 360 + dec = np.rad2deg(np.arctan2(rotation_matrix[0, 2], + norm(rotation_matrix[1:3, 2]))) + roll = np.rad2deg(np.arctan2(rotation_matrix[1, 2], + rotation_matrix[2, 2])) % 360 + + if distortion is None: + # Compare mutual angles in catalogue to those with current + # FOV estimate in order to scale accurately for fine FOV + angles_camera = _angle_from_distance(pdist(matched_image_vectors)) + angles_catalogue = _angle_from_distance(pdist(matched_catalog_vectors)) + fov *= np.mean(angles_catalogue / angles_camera) + k = None + else: + # Accurately calculate the FOV and distortion by looking at the angle + # from boresight on all matched catalogue vectors and all matched + # image centroids + matched_catalog_vectors_derot = np.dot( + rotation_matrix, matched_catalog_vectors.T).T + tangent_matched_catalog_vectors = norm( + matched_catalog_vectors_derot[:, 1:], axis=1) \ + /matched_catalog_vectors_derot[:, 0] + # Get the (distorted) pixel distance from image centre for all matches + # (scaled relative to width/2) + matched_image_centroids = image_centroids[matched_stars[:, 0], :] + radius_matched_image_centroids = norm(matched_image_centroids + - [height/2, width/2], axis=1)/width*2 + # Solve system of equations in RMS sense for focal length f and distortion k + # where f is focal length in units of image width/2 + # and k is distortion at width/2 (negative is barrel) + # undistorted = distorted*(1 - k*(distorted*2/width)^2) + A = np.hstack((tangent_matched_catalog_vectors[:, None], + radius_matched_image_centroids[:, None]**3)) + b = radius_matched_image_centroids[:, None] + (f, k) = lstsq(A, b, rcond=None)[0].flatten() + # Correct focal length to be at horizontal FOV + f = f/(1 - k) + self._logger.debug('Calculated focal length to %.2f and distortion to %.3f' % + (f, k)) + # Calculate (horizontal) true field of view + fov = 2*np.arctan(1/f) + # Re-undistort centroids using updated distortion for final calculations + image_centroids_undist = _undistort_centroids(image_centroids, + (height, width), k) + matched_image_centroids_undist = image_centroids_undist[ + matched_stars[:, 0], :] + + # Re-apply refined rotation matrix and FOV to nearby_cat_star_vectors. + nearby_cat_star_vectors_derot = np.dot(rotation_matrix, + nearby_cat_star_vectors.T).T + (nearby_cat_star_centroids, kept) = _compute_centroids( + nearby_cat_star_vectors_derot, (height, width), fov) + + # Get vectors + final_match_vectors = _compute_vectors( + matched_image_centroids_undist, (height, width), fov) + # Rotate to the sky + final_match_vectors = np.dot(rotation_matrix.T, final_match_vectors.T).T + + # Calculate residual angles between image vectors and catalog vectors. + distance = norm(final_match_vectors - matched_catalog_vectors, axis=1) + distance.sort() + p90_index = int(0.9 * (len(distance)-1)) + p90_err_angle = np.rad2deg(_angle_from_distance(distance[p90_index])) * 3600 + max_err_angle = np.rad2deg(_angle_from_distance(distance[-1])) * 3600 + angle = _angle_from_distance(distance) + rms_err_angle = np.rad2deg(np.sqrt(np.mean(angle**2))) * 3600 + + # Solved in this time + t_solve = (precision_timestamp() - t0_solve)*1000 + solution_dict = {'RA': ra, 'Dec': dec, + 'Roll': roll, + 'FOV': np.rad2deg(fov), + 'distortion': k, + 'RMSE': rms_err_angle, + 'P90E': p90_err_angle, + 'MAXE': max_err_angle, + 'Matches': num_star_matches, + 'Prob': prob_mismatch*self.num_patterns, + 'epoch_equinox': self._db_props['epoch_equinox'], + 'epoch_proper_motion': self._db_props['epoch_proper_motion'], + 'T_solve': t_solve, + 'status': MATCH_FOUND} + + # If we were given target pixel(s), calculate their ra/dec + if target_pixel is not None: + self._logger.debug('Calculate RA/Dec for targets: %s' % target_pixel) + # Calculate the vector in the sky of the target pixel(s) + if k is not None: + target_pixel = _undistort_centroids(target_pixel, (height, width), k) + target_vector = _compute_vectors( + target_pixel, (height, width), fov) + rotated_target_vector = np.dot(rotation_matrix.T, target_vector.T).T + # Calculate and add RA/Dec to solution + target_ra = np.rad2deg(np.arctan2(rotated_target_vector[:, 1], + rotated_target_vector[:, 0])) % 360 + target_dec = 90 - np.rad2deg( + np.arccos(rotated_target_vector[:,2])) + + if target_ra.shape[0] > 1: + solution_dict['RA_target'] = target_ra.tolist() + solution_dict['Dec_target'] = target_dec.tolist() + else: + solution_dict['RA_target'] = target_ra[0] + solution_dict['Dec_target'] = target_dec[0] + + # If we were given target sky coord(s), calculate their image x/y if + # within FOV. + if target_sky_coord is not None: + self._logger.debug('Calculate y/x for sky targets: %s' % target_sky_coord) + target_sky_vectors = [] + for tsc in target_sky_coord: + ra = np.deg2rad(tsc[0]) + dec = np.deg2rad(tsc[1]) + target_sky_vectors.append([np.cos(ra) * np.cos(dec), + np.sin(ra) * np.cos(dec), + np.sin(dec)]) + target_sky_vectors = np.array(target_sky_vectors) + target_sky_vectors_derot = np.dot(rotation_matrix, target_sky_vectors.T).T + (target_centroids, kept) = _compute_centroids(target_sky_vectors_derot, + (height, width), fov) + if k is not None: + for ind in kept: + centroid = target_centroids[ind] + target_centroids[ind] = _distort_centroids( + [centroid], (height, width), k)[0] + target_y = [] + target_x = [] + for i in range(target_centroids.shape[0]): + if i in kept: + target_y.append(target_centroids[i][0]) + target_x.append(target_centroids[i][1]) + else: + target_y.append(None) + target_x.append(None) + if target_sky_coord.shape[0] > 1: + solution_dict['y_target'] = target_y + solution_dict['x_target'] = target_x + else: + solution_dict['y_target'] = target_y[0] + solution_dict['x_target'] = target_x[0] + + # If requested to return data about matches, append to dict + if return_matches: + match_data = self._get_matched_star_data( + image_centroids[matched_stars[:, 0]], + nearby_cat_star_inds[matched_stars[:, 1]]) + solution_dict.update(match_data) + + pattern_centroids = [] + for img_pat_ind in image_pattern_indices: + pattern_centroids.append(image_centroids[img_pat_ind]) + solution_dict.update({'pattern_centroids': pattern_centroids}) + + # If requested to return catalog stars in FOV, append to dict. + if return_catalog: + catalog_tuples = [] + for (i, centroid) in enumerate(nearby_cat_star_centroids): + star_ind = nearby_cat_star_inds[i] + ra = np.rad2deg(self.star_table[star_ind, 0]) + dec = np.rad2deg(self.star_table[star_ind, 1]) + mag = self.star_table[star_ind, 5] + (y, x) = centroid + if k is not None: + dist_centroid = _distort_centroids([centroid], (height, width), k) + (y, x) = dist_centroid[0] + catalog_tuples.append( (ra, dec, mag, y, x) ) + solution_dict.update({'catalog_stars': catalog_tuples}) + + # If requested to create a visualisation, do so and append + if return_visual: + self._logger.debug('Generating visualisation') + img = Image.new('RGB', (width, height)) + img_draw = ImageDraw.Draw(img) + # Make list of matched and not from catalogue + matched = matched_stars[:, 1] + not_matched = np.array([True]*len(nearby_cat_star_centroids)) + not_matched[matched] = False + not_matched = np.flatnonzero(not_matched) + + def draw_circle(centre, radius, **kwargs): + bbox = [centre[1] - radius, + centre[0] - radius, + centre[1] + radius, + centre[0] + radius] + img_draw.ellipse(bbox, **kwargs) + + for cent in image_centroids: + # Centroids with no/given distortion + draw_circle(cent, 2, fill='white') + for cent in image_centroids_undist: + # Image centroids with coarse distortion for matching + draw_circle(cent, 1, fill='darkorange') + for cent in image_centroids_undist[image_pattern_indices, :]: + # Make the pattern ones larger + draw_circle(cent, 3, outline='darkorange') + for cent in matched_image_centroids_undist: + # Centroid position with solution distortion + draw_circle(cent, 1, fill='green') + for match in matched: + # Green circle for succeessful match + draw_circle(nearby_cat_star_centroids[match], + width*match_radius, outline='green') + for match in not_matched: + # Red circle for failed match + draw_circle(nearby_cat_star_centroids[match], + width*match_radius, outline='red') + + solution_dict['visual'] = img + + if return_rotation_matrix: + solution_dict['rotation_matrix'] = rotation_matrix.tolist() + + self._logger.debug(solution_dict) + self._logger.debug( + 'For %d centroids, evaluated %s image patterns; searched %s pattern keys' % + (num_centroids, + image_patterns_evaluated, + search_space_explored)) + self._logger.debug( + 'Looked up/evaluated %s/%s catalog patterns' % + (catalog_lookup_count, catalog_eval_count)) + return solution_dict + # Close of image_pattern_indices loop + + # Failed to solve (or timeout or cancel), get time and return None + t_solve = (precision_timestamp() - t0_solve) * 1000 + self._logger.debug('FAIL: Did not find a match to the stars! It took ' + + str(round(t_solve)) + ' ms.') + self._logger.debug( + 'FAIL: For %d centroids, evaluated %s image patterns; searched %s pattern keys' % + (num_centroids, + image_patterns_evaluated, + search_space_explored)) + self._logger.debug( + 'FAIL: Looked up/evaluated %s/%s catalog patterns' % + (catalog_lookup_count, catalog_eval_count)) + return {'RA': None, 'Dec': None, 'Roll': None, 'FOV': None, 'distortion': None, + 'RMSE': None, 'P90E': None, 'MAXE': None, 'Matches': None, 'Prob': None, + 'epoch_equinox': None, 'epoch_proper_motion': None, 'T_solve': t_solve, + 'status': status} + + def cancel_solve(self): + """Signal that a currently running solve_from_image() or solve_from_centroids() should + terminate immediately. + If no solve_from_{image,centroids} is running, this call affects the next solve attempt. + """ + self._logger.debug('cancelling') + self._cancelled = True + + def _get_all_patterns_for_index(self, pattern_key_hash, hash_index, upper_tri_index, + image_pattern_largest_edge, fov_estimate, fov_max_error, + linear_probe): + """Returns (edges, vectors) for all pattern table entries for `hash_index`.""" + + # Iterate over table hash indices. + hash_match_inds = _get_table_indices_from_hash( + hash_index, self.pattern_catalog, linear_probe) + if len(hash_match_inds) == 0: + return (None, None) + + if self.pattern_key_hashes is not None: + key_hash16 = np.uint16(int(pattern_key_hash) & 0xffff) + keep = self.pattern_key_hashes[hash_match_inds] == key_hash16 + hash_match_inds = hash_match_inds[keep] + if len(hash_match_inds) == 0: + return (None, None) + + if self.pattern_largest_edge is not None \ + and fov_estimate is not None \ + and fov_max_error is not None: + # Can immediately compare FOV to patterns to remove mismatches + largest_edge = self.pattern_largest_edge[hash_match_inds].astype(np.float32) + fov2 = largest_edge / image_pattern_largest_edge * fov_estimate / 1000 + keep = abs(fov2 - fov_estimate) < fov_max_error + hash_match_inds = hash_match_inds[keep] + if len(hash_match_inds) == 0: + return (None, None) + catalog_matches = self.pattern_catalog[hash_match_inds, :] + + # Get star vectors for all matching hashes + catalog_pattern_vectors = self.star_table[catalog_matches, 2:5] + # Calculate pattern by angles between vectors + # implement more accurate angle calculation + # this is a bit manual, I could not see a faster way + arr1 = np.take(catalog_pattern_vectors, upper_tri_index[0], axis=1) + arr2 = np.take(catalog_pattern_vectors, upper_tri_index[1], axis=1) + catalog_pattern_edges = np.sort(_angle_from_distance(norm(arr1 - arr2, axis=-1))) + + return (catalog_pattern_edges, catalog_pattern_vectors) + + def _get_nearby_catalog_stars(self, vector, radius): + """Get star indices within radius radians of the vector. Sorted brightest first.""" + max_dist = _distance_from_angle(radius) + nearby = self._star_kd_tree.query_ball_point(vector, max_dist) + return np.sort(nearby) + + def _get_matched_star_data(self, centroid_data, star_indices): + """Get dictionary of matched star data to return. + + centroid_data: ndarray of centroid data Nx2, each row (y, x) + star_indices: ndarray of matching star indices len N + + return dict with keys: + - matched_centroids: Nx2 (y, x) in pixel coordinates, sorted by brightness + - matched_stars: Nx3 (ra (deg), dec (deg), magnitude) + - matched_catID: (N,) or (N, 3) with catalogue ID + """ + output = {} + output['matched_centroids'] = centroid_data.tolist() + stars = self.star_table[star_indices, :][:, [0, 1, 5]] + stars[:,:2] = np.rad2deg(stars[:,:2]) + output['matched_stars'] = stars.tolist() + if self.star_catalog_IDs is None: + output['matched_catID'] = None + elif len(self.star_catalog_IDs.shape) > 1: + # Have 2D array, pick rows + output['matched_catID'] = self.star_catalog_IDs[star_indices, :].tolist() + else: + # Have 1D array, pick indices + output['matched_catID'] = self.star_catalog_IDs[star_indices].tolist() + return output + + @staticmethod + def _build_catalog_path(star_catalog): + """ build the path to the star catalog and parse the catalog name + Args: + star_catalog (str or pathlib.Path, optional): the name or path to the star catalog file + Returns: + (tuple[str, pathlib.Path]): return the pure catalog name and the file path + """ + if star_catalog in _supported_databases: + # only name supplied, assume file is adjacent to this code file + catalog_file_full_pathname = _lib_root / star_catalog + else: + # a path string or path object supplied, parse out the pure name + catalog_file_full_pathname = Path(star_catalog).expanduser() + star_catalog = catalog_file_full_pathname.name.rstrip(catalog_file_full_pathname.suffix) + + if star_catalog not in _supported_databases: + raise ValueError( + f"star_catalog name must be one of {_supported_databases}, got: {star_catalog}") + + # Add .dat suffix for hip and tyc if not present + if star_catalog in ('hip_main', 'tyc_main') and not catalog_file_full_pathname.suffix: + catalog_file_full_pathname = catalog_file_full_pathname.with_suffix('.dat') + + if not catalog_file_full_pathname.exists(): + raise ValueError(f'No star catalogue found at {str(catalog_file_full_pathname)}') + + return star_catalog, catalog_file_full_pathname + +# celestial_coords: [[ra, dec], ...] in degrees +# returns: [[y, x], ...] +def transform_to_image_coords(celestial_coords, width, height, fov, + rotation_matrix, distortion): + rotation_matrix = np.array(rotation_matrix) + celestial_vectors = [] + for cc in celestial_coords: + ra = np.deg2rad(cc[0]) + dec = np.deg2rad(cc[1]) + celestial_vectors.append([np.cos(ra) * np.cos(dec), + np.sin(ra) * np.cos(dec), + np.sin(dec)]) + celestial_vectors = np.array(celestial_vectors) + celestial_vectors_derot = np.dot(rotation_matrix, celestial_vectors.T).T + (image_coords, kept) = _compute_centroids( + celestial_vectors_derot, (height, width), np.deg2rad(fov)) + image_coords = _distort_centroids(image_coords, (height, width), distortion) + result = [] + for i in range(image_coords.shape[0]): + if i in kept: + result.append(image_coords[i]) + return result + +# image_coords: [[y, x], ...] +# returns: [[ra, dec], ...] in degrees +def transform_to_celestial_coords(image_coords, width, height, fov, + rotation_matrix, distortion): + rotation_matrix = np.array(rotation_matrix) + + image_coords = np.array(image_coords) + image_coords = _undistort_centroids(image_coords, (height, width), distortion) + image_vectors = _compute_vectors(image_coords, (height, width), np.deg2rad(fov)) + rotated_image_vectors = np.dot(rotation_matrix.T, image_vectors.T).T + + # Calculate and add RA/Dec to solution + ra = np.rad2deg(np.arctan2(rotated_image_vectors[:, 1], + rotated_image_vectors[:, 0])) % 360 + dec = 90 - np.rad2deg(np.arccos(rotated_image_vectors[:,2])) + + celestial_vectors = [] + for i in range(len(ra)): + celestial_vectors.append((ra[i], dec[i])) + + return celestial_vectors + + +def get_centroids_from_image(image, sigma=2, image_th=None, crop=None, downsample=None, + filtsize=25, bg_sub_mode='local_mean', sigma_mode='global_root_square', + binary_open=True, centroid_window=None, max_area=100, min_area=5, + max_sum=None, min_sum=None, max_axis_ratio=None, max_returned=None, + return_moments=False, return_images=False): + """Extract spot centroids from an image and calculate statistics. + + This is a versatile function for finding spots (e.g. stars or satellites) in an image and + calculating/filtering their positions (centroids) and statistics (e.g. sum, area, shape). + + The coordinates start at the top/left edge of the pixel, i.e. x=y=0.5 is the centre of the + top-left pixel. To convert the results to integer pixel indices use the floor operator. + + To aid in finding optimal settings pass `return_images=True` to get back a dictionary with + partial extraction results and tweak the parameters accordingly. The dictionary entry + `binary_mask` shows the result of the raw star detection and `final_centroids` labels the + centroids in the original image (green for accepted, red for rejected). + + Technically, the best extraction is attained with `bg_sub_mode='local_median'` and + `sigma_mode='local_median_abs'` with a reasonable (e.g. 15) size filter and a very sharp image. + However, this may be slow (especially for larger filter sizes) and requires that the camera + readout bit-depth is sufficient to accurately capture the camera noise. A recommendable and + much faster alternative is `bg_sub_mode='local_mean'` and `sigma_mode='global_root_square'` + with a larger (e.g. 25 or more) sized filter, which is the default. You may elect to do + background subtraction and image thresholding by your own methods, then pass `bg_sub_mode=None` + and your threshold as `image_th` to bypass these extraction steps. + + The algorithm proceeds as follows: + 1. Convert image to 2D numpy.ndarray with type float32. + 2. Call :meth:`tetra3.crop_and_downsample_image` with the image and supplied arguments + `crop` and `downsample`. + 3. Subtract the background if `bg_sub_mode` is not None. Four methods are available: + + - 'local_median': Create the background image using a median filter of + size `filtsize` and subtract pixelwise. + - 'local_mean' (the default): Create the background image using a mean filter of size + `filtsize` and subtract pixelwise. + - 'global_median': Subtract the median value of all pixels from each pixel. + - 'global_mean': Subtract the mean value of all pixels from each pixel. + + 4. Calculate the image threshold if image_th is None. If image_th is defined this value + will be used to threshold the image. The threshold is determined by calculating the + noise standard deviation with the method selected as `sigma_mode` and then scaling it by + `sigma` (default 3). The available methods are: + + - 'local_median_abs': For each pixel, calculate the standard deviation as + the median of the absolute values in a region of size `filtsize` and scale by 1.48. + - 'local_root_square': For each pixel, calculate the standard deviation as the square + root of the mean of the square values in a region of size `filtsize`. + - 'global_median_abs': Use the median of the absolute value of all pixels scaled by 1.48 + as the standard deviation. + - 'global_root_square' (the default): Use the square root of the mean of the square of + all pixels as the standard deviation. + + 5. Create a binary mask using the image threshold. If `binary_open=True` (the default) + apply a binary opening operation with a 3x3 cross as structuring element to clean up the + mask. + 6. Label all regions (spots) in the binary mask. + 7. Calculate statistics on each region and reject it if it fails any of the max or min + values passed. Calculated statistics are: area, sum, centroid (first moments) in x and + y, second moments in xx, yy, and xy, major over minor axis ratio. + 8. Sort the regions, largest sum first, and keep at most `max_returned` if not None. + 9. If `centroid_window` is not None, recalculate the statistics using a square region of + the supplied width (instead of the region from the binary mask). + 10. Undo the effects of cropping and downsampling by adding offsets/scaling the centroid + positions to correspond to pixels in the original image. + + Args: + image (PIL.Image): Image to find centroids in. + sigma (float, optional): The number of noise standard deviations to threshold at. + Default 2. + image_th (float, optional): The value to threshold the image at. If supplied `sigma` and + `simga_mode` will have no effect. + crop (tuple, optional): Cropping to apply, see :meth:`tetra3.crop_and_downsample_image`. + downsample (int, optional): Downsampling to apply, see + :meth:`tetra3.crop_and_downsample_image`. + filtsize (int, optional): Size of filter to use in local operations. Must be odd. + Default 25. + bg_sub_mode (str, optional): Background subtraction mode. Must be one of 'local_median', + 'local_mean' (the default), 'global_median', 'global_mean'. + sigma_mode (str, optinal): Mode used to calculate noise standard deviation. Must be one of + 'local_median_abs', 'local_root_square', 'global_median_abs', or + 'global_root_square' (the default). + binary_open (bool, optional): If True (the default), apply binary opening with 3x3 cross + to thresholded binary mask. + centroid_window (int, optional): If supplied, recalculate statistics using a square window + of the supplied size. + max_area (int, optional): Reject spots larger than this. Defaults to 100 pixels. + min_area (int, optional): Reject spots smaller than this. Defaults to 5 pixels. + max_sum (float, optional): Reject spots with a sum larger than this. Defaults to None. + min_sum (float, optional): Reject spots with a sum smaller than this. Defaults to None. + max_axis_ratio (float, optional): Reject spots with a ratio of major over minor axis larger + than this. Defaults to None. + max_returned (int, optional): Return at most this many spots. Defaults to None, which + returns all spots. Will return in order of brightness (spot sum). + return_moments (bool, optional): If set to True, return the calculated statistics (e.g. + higher order moments, sum, area) together with the spot positions. + return_images (bool, optional): If set to True, return a dictionary with partial results + from the steps in the algorithm. + + Returns: + numpy.ndarray or tuple: If `return_moments=False` and `return_images=False` (the defaults) + an array of shape (N,2) is returned with centroid positions (y down, x right) of the + found spots in order of brightness. If `return_moments=True` a tuple of numpy arrays + is returned with: (N,2) centroid positions, N sum, N area, (N,3) xx yy and xy second + moments, N major over minor axis ratio. If `return_images=True` a tuple is returned + with the results as defined previously and a dictionary with images and data of partial + results. The keys are: `converted_input`: The input after conversion to a mono float + numpy array. `cropped_and_downsampled`: The image after cropping and downsampling. + `removed_background`: The image after background subtraction. `binary_mask`: The + thresholded image where raw stars are detected (after binary opening). + `final_centroids`: The original image annotated with green circles for the extracted + centroids, and red circles for any centroids that were rejected. + """ + + # 1. Ensure image is float np array and 2D: + raw_image = image.copy() + image = np.asarray(image, dtype=np.float32) + if image.ndim == 3: + assert image.shape[2] in (1, 3), 'Colour image must have 1 or 3 colour channels' + if image.shape[2] == 3: + # Convert to greyscale + image = image[:, :, 0]*.299 + image[:, :, 1]*.587 + image[:, :, 2]*.114 + else: + # Delete empty dimension + image = image.squeeze(axis=2) + else: + assert image.ndim == 2, 'Image must be 2D or 3D array' + if return_images: + images_dict = {'converted_input': image.copy()} + # 2 Crop and downsample + (image, offs) = crop_and_downsample_image(image, crop=crop, downsample=downsample, + return_offsets=True, sum_when_downsample=True) + (height, width) = image.shape + (offs_h, offs_w) = offs + if return_images: + images_dict['cropped_and_downsampled'] = image.copy() + # 3. Subtract background: + if bg_sub_mode is not None: + if bg_sub_mode.lower() == 'local_median': + assert filtsize is not None, \ + 'Must define filter size for local median background subtraction' + assert filtsize % 2 == 1, 'Filter size must be odd' + image = image - scipy.ndimage.filters.median_filter(image, size=filtsize, + output=image.dtype) + elif bg_sub_mode.lower() == 'local_mean': + assert filtsize is not None, \ + 'Must define filter size for local median background subtraction' + assert filtsize % 2 == 1, 'Filter size must be odd' + image = image - scipy.ndimage.filters.uniform_filter(image, size=filtsize, + output=image.dtype) + elif bg_sub_mode.lower() == 'global_median': + image = image - np.median(image) + elif bg_sub_mode.lower() == 'global_mean': + image = image - np.mean(image) + else: + raise AssertionError('bg_sub_mode must be string: local_median, local_mean,' + + ' global_median, or global_mean') + if return_images: + images_dict['removed_background'] = image.copy() + # 4. Find noise standard deviation to threshold unless a threshold is already defined! + if image_th is None: + assert sigma_mode is not None and isinstance(sigma_mode, str), \ + 'Must define a sigma mode or image threshold' + assert sigma is not None and isinstance(sigma, (int, float)), \ + 'Must define sigma for thresholding (int or float)' + if sigma_mode.lower() == 'local_median_abs': + assert filtsize is not None, 'Must define filter size for local median sigma mode' + assert filtsize % 2 == 1, 'Filter size must be odd' + img_std = scipy.ndimage.filters.median_filter(np.abs(image), size=filtsize, + output=image.dtype) * 1.48 + elif sigma_mode.lower() == 'local_root_square': + assert filtsize is not None, 'Must define filter size for local median sigma mode' + assert filtsize % 2 == 1, 'Filter size must be odd' + img_std = np.sqrt(scipy.ndimage.filters.uniform_filter(image**2, size=filtsize, + output=image.dtype)) + elif sigma_mode.lower() == 'global_median_abs': + img_std = np.median(np.abs(image)) * 1.48 + elif sigma_mode.lower() == 'global_root_square': + img_std = np.sqrt(np.mean(image**2)) + else: + raise AssertionError('sigma_mode must be string: local_median_abs, local_root_square,' + + ' global_median_abs, or global_root_square') + image_th = img_std * sigma + #if return_images: + # images_dict['image_threshold'] = image_th + # 5. Threshold to find binary mask + bin_mask = image > image_th + if binary_open: + bin_mask = scipy.ndimage.binary_opening(bin_mask) + if return_images: + images_dict['binary_mask'] = bin_mask + # 6. Label each region in the binary mask + (labels, num_labels) = scipy.ndimage.label(bin_mask) + index = np.arange(1, num_labels + 1) + #if return_images: + # images_dict['labelled_regions'] = labels + if num_labels < 1: + # Found nothing in binary image, return empty. + if return_moments and return_images: + return ((np.empty((0, 2)), np.empty((0, 1)), np.empty((0, 1)), np.empty((0, 3)), + np.empty((0, 1))), images_dict) + elif return_moments: + return (np.empty((0, 2)), np.empty((0, 1)), np.empty((0, 1)), np.empty((0, 3)), + np.empty((0, 1))) + elif return_images: + return (np.empty((0, 2)), images_dict) + else: + return np.empty((0, 2)) + + # 7. Get statistics and threshold + def calc_stats(a, p): + """Calculates statistics for each labelled region: + - Sum (zeroth moment) + - Centroid y, x (first moment) + - Variance xx, yy, xy (second moment) + - Area (pixels) + - Major axis/minor axis ratio + First variable will be NAN if failed any of the checks + """ + (y, x) = (np.unravel_index(p, (height, width))) + area = len(a) + centroid = np.sum([a, x*a, y*a], axis=-1) + m0 = centroid[0] + centroid[1:] = centroid[1:] / m0 + m1_x = centroid[1] + m1_y = centroid[2] + # Check basic filtering + if min_area and area < min_area: + return (np.nan, m1_y+.5, m1_x+.5, np.nan, np.nan, np.nan, np.nan, np.nan) + if max_area and area > max_area: + return (np.nan, m1_y+.5, m1_x+.5, np.nan, np.nan, np.nan, np.nan, np.nan) + if min_sum and m0 < min_sum: + return (np.nan, m1_y+.5, m1_x+.5, np.nan, np.nan, np.nan, np.nan, np.nan) + if max_sum and m0 > max_sum: + return (np.nan, m1_y+.5, m1_x+.5, np.nan, np.nan, np.nan, np.nan, np.nan) + # If higher order data is requested or used for filtering, calculate. + if return_moments or max_axis_ratio is not None: + # Need to calculate second order data about the regions, firstly the moments + # then use that to get major/minor axes. + m2_xx = max(0, np.sum((x - m1_x)**2 * a) / m0) + m2_yy = max(0, np.sum((y - m1_y)**2 * a) / m0) + m2_xy = np.sum((x - m1_x) * (y - m1_y) * a) / m0 + major = np.sqrt(2 * (m2_xx + m2_yy + np.sqrt((m2_xx - m2_yy)**2 + 4 * m2_xy**2))) + minor = np.sqrt(2 * max(0, m2_xx + m2_yy - np.sqrt((m2_xx - m2_yy)**2 + 4 * m2_xy**2))) + if max_axis_ratio and minor <= 0: + return (np.nan, m1_y+.5, m1_x+.5, np.nan, np.nan, np.nan, np.nan, np.nan) + axis_ratio = major / max(minor, .000000001) + if max_axis_ratio and axis_ratio > max_axis_ratio: + return (np.nan, m1_y+.5, m1_x+.5, np.nan, np.nan, np.nan, np.nan, np.nan) + return (m0, m1_y+.5, m1_x+.5, m2_xx, m2_yy, m2_xy, area, axis_ratio) + else: + return (m0, m1_y+.5, m1_x+.5, np.nan, np.nan, np.nan, area, np.nan) + + tmp = scipy.ndimage.labeled_comprehension(image, labels, index, calc_stats, '8f', None, + pass_positions=True) + valid = ~np.isnan(tmp[:, 0]) + extracted = tmp[valid, :] + rejected = tmp[~valid, :] + if return_images: + # Convert 16-bit to 8-bit: + if raw_image.mode == 'I;16': + tmp = np.array(raw_image, dtype=np.uint16) + tmp //= 256 + tmp = tmp.astype(np.uint8) + raw_image = Image.fromarray(tmp) + # Convert mono to RGB + if raw_image.mode != 'RGB': + raw_image = raw_image.convert('RGB') + # Draw green circles for kept centroids, red for rejected + img_draw = ImageDraw.Draw(raw_image) + def draw_circle(centre, radius, **kwargs): + bbox = [centre[1] - radius, + centre[0] - radius, + centre[1] + radius, + centre[0] + radius] + img_draw.ellipse(bbox, **kwargs) + for entry in extracted: + pos = entry[1:3].copy() + size = .01*width + if downsample is not None: + pos *= downsample + pos += [offs_h, offs_w] + size *= downsample + draw_circle(pos, size, outline='green') + for entry in rejected: + pos = entry[1:3].copy() + size = .01*width + if downsample is not None: + pos *= downsample + pos += [offs_h, offs_w] + size *= downsample + draw_circle(pos, size, outline='red') + images_dict['final_centroids'] = raw_image + + # 8. Sort + order = (-extracted[:, 0]).argsort() + if max_returned: + order = order[:max_returned] + extracted = extracted[order, :] + # 9. If desired, redo centroiding with traditional window + if centroid_window is not None: + if centroid_window > min(height, width): + centroid_window = min(height, width) + for i in range(extracted.shape[0]): + c_x = int(np.floor(extracted[i, 2])) + c_y = int(np.floor(extracted[i, 1])) + offs_x = c_x - centroid_window // 2 + offs_y = c_y - centroid_window // 2 + if offs_y < 0: + offs_y = 0 + if offs_y > height - centroid_window: + offs_y = height - centroid_window + if offs_x < 0: + offs_x = 0 + if offs_x > width - centroid_window: + offs_x = width - centroid_window + img_cent = image[offs_y:offs_y + centroid_window, offs_x:offs_x + centroid_window] + img_sum = np.sum(img_cent) + (xx, yy) = np.meshgrid(np.arange(centroid_window) + .5, + np.arange(centroid_window) + .5) + xc = np.sum(img_cent * xx) / img_sum + yc = np.sum(img_cent * yy) / img_sum + extracted[i, 1:3] = np.array([yc, xc]) + [offs_y, offs_x] + # 10. Revert effects of crop and downsample + if downsample: + extracted[:, 1:3] = extracted[:, 1:3] * downsample # Scale centroid + if crop: + extracted[:, 1:3] = extracted[:, 1:3] + np.array([offs_h, offs_w]) # Offset centroid + # Return results, default just the centroids + if not any((return_moments, return_images)): + return extracted[:, 1:3] + # Otherwise, build list of requested returned items + result = [extracted[:, 1:3]] + if return_moments: + result.append([extracted[:, 0], extracted[:, 6], extracted[:, 3:6], + extracted[:, 7]]) + if return_images: + result.append(images_dict) + return tuple(result) + + +def crop_and_downsample_image(image, crop=None, downsample=None, sum_when_downsample=True, + return_offsets=False): + """Crop and/or downsample an image. Cropping is applied before downsampling. + + Args: + image (numpy.ndarray): The image to crop and downsample. Must be 2D. + crop (int or tuple, optional): Desired cropping of the image. May be defined in three ways: + + - Scalar: Image is cropped to given fraction (e.g. crop=2 gives 1/2 size image out). + - 2-tuple: Image is cropped to centered region with size crop = (height, width). + - 4-tuple: Image is cropped to region with size crop[0:2] = (height, width), offset + from the centre by crop[2:4] = (offset_down, offset_right). + + downsample (int, optional): Downsampling factor, e.g. downsample=2 will combine 2x2 pixel + regions into one. The image width and height must be divisible by this factor. + sum_when_downsample (bool, optional): If True (the default) downsampled pixels are + calculated by summing the original pixel values. If False the mean is used. + return_offsets (bool, optional): If set to True, the applied cropping offset from the top + left corner is returned. + Returns: + numpy.ndarray or tuple: If `return_offsets=False` (the default) a 2D array with the cropped + and dowsampled image is returned. If `return_offsets=True` is passed a tuple containing + the image and a tuple with the cropping offsets (top, left) is returned. + """ + # Input must be 2-d numpy array + # Crop can be either a scalar, 2-tuple, or 4-tuple: + # Scalar: Image is cropped to given fraction (eg input crop=2 gives 1/2 size image out) + # If 2-tuple: Image is cropped to center region with size crop = (height, width) + # If 4-tuple: Image is cropped to ROI with size crop[0:1] = (height, width) + # offset from centre by crop[2:3] = (offset_down, offset_right) + # Downsample is made by summing regions of downsample by downsample pixels. + # To get the mean set sum_when_downsample=False. + # Returned array is same type as input array! + + image = np.asarray(image) + assert image.ndim == 2, 'Input must be 2D' + # Do nothing if both are None + if crop is None and downsample is None: + if return_offsets is True: + return (image, (0, 0)) + else: + return image + full_height, full_width = image.shape + # Check if input is integer type (and therefore can overflow...) + if np.issubdtype(image.dtype, np.integer): + intype = image.dtype + else: + intype = None + # Crop: + if crop is not None: + try: + # Make crop into list of int + crop = [int(x) for x in crop] + if len(crop) == 2: + crop = crop + [0, 0] + elif len(crop) == 4: + pass + else: + raise ValueError('Length of crop must be 2 or 4 if iterable, not ' + + str(len(crop)) + '.') + except TypeError: + # Could not make list (i.e. not iterable input), crop to portion + crop = int(crop) + assert crop > 0, 'Crop must be greater than zero if scalar.' + assert full_height % crop == 0 and full_width % crop == 0,\ + 'Crop must be divisor of image height and width if scalar.' + crop = [full_height // crop, full_width // crop, 0, 0] + # Calculate new height and width (making sure divisible with future downsampling) + divisor = downsample if downsample is not None else 2 + height = int(np.ceil(crop[0]/divisor)*divisor) + width = int(np.ceil(crop[1]/divisor)*divisor) + # Clamp at original size + if height > full_height: + height = full_height + if width > full_width: + width = full_width + # Calculate offsets from centre + offs_h = int(round(crop[2] + (full_height - height)/2)) + offs_w = int(round(crop[3] + (full_width - width)/2)) + # Clamp to be inside original image + if offs_h < 0: + offs_h = 0 + if offs_h > full_height-height: + offs_h = full_height-height + if offs_w < 0: + offs_w = 0 + if offs_w > full_width-width: + offs_w = full_width-width + # Do the cropping + image = image[offs_h:offs_h+height, offs_w:offs_w+width] + else: + offs_h = 0 + offs_w = 0 + height = full_height + width = full_width + # Downsample: + if downsample is not None: + assert height % downsample == 0 and width % downsample == 0,\ + '(Cropped) image must be divisible by downsampling factor' + if intype is not None: + # Convert integer types into float for summing without overflow risk + image = image.astype(np.float32) + if sum_when_downsample is True: + image = image.reshape((height//downsample, downsample, width//downsample, + downsample)).sum(axis=-1).sum(axis=1) + else: + image = image.reshape((height//downsample, downsample, width//downsample, + downsample)).mean(axis=-1).mean(axis=1) + if intype is not None: + # Convert back with clipping + image = image.clip(np.iinfo(intype).min, np.iinfo(intype).max).astype(intype) + # Return image and if desired the offset. + if return_offsets is True: + return (image, (offs_h, offs_w)) + else: + return image diff --git a/ogscope/web/api/alignment/routes.py b/ogscope/web/api/alignment/routes.py index 766fba8..be904f4 100644 --- a/ogscope/web/api/alignment/routes.py +++ b/ogscope/web/api/alignment/routes.py @@ -1,6 +1,7 @@ """ 极轴校准相关API路由 / Polar alignment API routes """ + from fastapi import APIRouter router = APIRouter() @@ -29,7 +30,5 @@ async def get_alignment_status(): "azimuth_error": 2.5, "altitude_error": 1.8, "precision": "good", - "progress": 75 + "progress": 75, } - - diff --git a/ogscope/web/api/analysis/lab_store.py b/ogscope/web/api/analysis/lab_store.py new file mode 100644 index 0000000..dad97e1 --- /dev/null +++ b/ogscope/web/api/analysis/lab_store.py @@ -0,0 +1,332 @@ +""" +星图解算实验室:清单、预设、实验记录文件存储 / Lab manifest, presets, experiment records. +""" + +from __future__ import annotations + +import base64 +import csv +import hashlib +import io +import json +import uuid +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from ogscope.config import Settings + + +def _utc_now() -> str: + return datetime.now(timezone.utc).isoformat() + + +class AnalysisLabStore: + """实验室侧持久化 / Lab persistence (JSON files).""" + + def __init__(self, settings: Settings) -> None: + self._settings = settings + self.upload_root = settings.upload_dir / "analysis" + self.presets_official = settings.data_dir / "analysis" / "presets" / "official" + self.presets_user = settings.data_dir / "analysis" / "presets" / "user" + self.experiments_root = settings.analysis_dir / "experiments" + for p in ( + self.upload_root, + self.presets_official, + self.presets_user, + self.experiments_root, + ): + p.mkdir(parents=True, exist_ok=True) + + @property + def manifest_path(self) -> Path: + return self.upload_root / "manifest.json" + + def load_manifest(self) -> dict[str, Any]: + """加载上传目录清单 / Load upload manifest.""" + if not self.manifest_path.is_file(): + return {"version": 1, "entries": {}} + try: + data = json.loads(self.manifest_path.read_text(encoding="utf-8")) + if not isinstance(data, dict) or "entries" not in data: + return {"version": 1, "entries": {}} + return data + except Exception: + return {"version": 1, "entries": {}} + + def save_manifest(self, data: dict[str, Any]) -> None: + self.manifest_path.write_text( + json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8" + ) + + def set_file_source(self, filename: str, source: str) -> None: + """设置素材来源标签 / Set asset source tag.""" + m = self.load_manifest() + entries: dict[str, Any] = m.setdefault("entries", {}) + ent = entries.setdefault(filename, {}) + ent["source"] = source + ent["updated_at"] = _utc_now() + self.save_manifest(m) + + def update_last_solve( + self, + filename: str, + metrics: dict[str, Any], + ) -> None: + """写入最近一次解算摘要 / Cache last solve summary for list UI.""" + m = self.load_manifest() + entries: dict[str, Any] = m.setdefault("entries", {}) + ent = entries.setdefault(filename, {}) + ent["last_solve"] = {**metrics, "at": _utc_now()} + self.save_manifest(m) + + def remove_manifest_entry(self, filename: str) -> None: + """从清单移除条目(删除文件后调用)/ Remove manifest row after file delete.""" + m = self.load_manifest() + entries: dict[str, Any] = m.setdefault("entries", {}) + if filename in entries: + del entries[filename] + self.save_manifest(m) + + def merge_list_entry(self, filename: str, base: dict[str, Any]) -> dict[str, Any]: + """合并清单元数据到列表项 / Merge manifest into upload list row.""" + m = self.load_manifest() + ent = m.get("entries", {}).get(filename, {}) + row = {**base} + if "source" in ent: + row["source"] = ent["source"] + else: + row["source"] = "unknown" + if "last_solve" in ent: + row["last_solve"] = ent["last_solve"] + return row + + def list_presets(self, scope: str) -> list[dict[str, Any]]: + """列出预设 JSON / List preset files.""" + root = self.presets_official if scope == "official" else self.presets_user + out: list[dict[str, Any]] = [] + if not root.is_dir(): + return out + for p in sorted(root.glob("*.json")): + try: + data = json.loads(p.read_text(encoding="utf-8")) + if isinstance(data, dict): + data.setdefault("id", p.stem) + data.setdefault("scope", scope) + out.append(data) + except Exception: + continue + return out + + def save_user_preset(self, name: str, params: dict[str, Any]) -> dict[str, Any]: + """保存用户预设 / Save user preset.""" + pid = str(uuid.uuid4()) + payload = { + "id": pid, + "name": name, + "scope": "user", + "params": params, + "created_at": _utc_now(), + } + target = self.presets_user / f"{pid}.json" + target.write_text( + json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8" + ) + return payload + + def delete_user_preset(self, preset_id: str) -> None: + """删除用户预设 / Delete user preset.""" + clean = Path(preset_id).name + target = self.presets_user / f"{clean}.json" + if target.is_file(): + target.unlink() + + def create_experiment( + self, + input_name: str, + preset_label: str, + result_json: dict[str, Any], + metrics: dict[str, Any], + thumbnail_png_base64: str | None, + replay: dict[str, Any] | None = None, + save_asset_snapshot: bool = True, + ) -> dict[str, Any]: + """写入实验记录 / Persist experiment record.""" + eid = str(uuid.uuid4()) + thumb_path: str | None = None + if thumbnail_png_base64: + raw = base64.b64decode( + thumbnail_png_base64.split(",")[-1] + if "," in thumbnail_png_base64 + else thumbnail_png_base64 + ) + thumb_path = str(self.experiments_root / f"{eid}.png") + Path(thumb_path).write_bytes(raw) + asset_snapshot_relpath: str | None = None + asset_digest: str | None = None + src = (self.upload_root / Path(input_name).name).resolve() + root = self.upload_root.resolve() + if save_asset_snapshot and src.is_file() and str(src).startswith(str(root)): + try: + data = src.read_bytes() + asset_digest = hashlib.sha256(data).hexdigest() + ext = src.suffix if src.suffix else ".bin" + asset_snapshot_relpath = f"{eid}_asset{ext}" + (self.experiments_root / asset_snapshot_relpath).write_bytes(data) + except OSError: + asset_snapshot_relpath = None + asset_digest = None + rec = { + "id": eid, + "input_name": input_name, + "preset_label": preset_label, + "created_at": _utc_now(), + "metrics": metrics, + "result_json": result_json, + "thumbnail_relpath": Path(thumb_path).name if thumb_path else None, + "replay": replay, + "asset_snapshot_relpath": asset_snapshot_relpath, + "asset_digest": asset_digest, + } + (self.experiments_root / f"{eid}.json").write_text( + json.dumps(rec, ensure_ascii=False, indent=2), encoding="utf-8" + ) + return rec + + def delete_experiment(self, experiment_id: str) -> None: + """删除一条实验记录 JSON、缩略图与素材快照 / Delete experiment artifacts.""" + clean = Path(experiment_id).name + if not clean or clean != experiment_id.strip(): + raise ValueError("实验 ID 无效 / Invalid experiment id") + jpath = self.experiments_root / f"{clean}.json" + if not jpath.is_file(): + raise FileNotFoundError("实验记录不存在 / Experiment not found") + try: + data = json.loads(jpath.read_text(encoding="utf-8")) + except Exception: + data = {} + snap = data.get("asset_snapshot_relpath") + jpath.unlink() + thumb = self.experiments_root / f"{clean}.png" + if thumb.is_file(): + thumb.unlink() + if isinstance(snap, str) and snap: + sp = (self.experiments_root / Path(snap).name).resolve() + er = self.experiments_root.resolve() + if str(sp).startswith(str(er)) and sp.is_file(): + sp.unlink() + + def count_experiments_for_input(self, input_name: str) -> int: + """统计引用某素材文件名的实验条数 / Count experiments for an upload basename.""" + base = Path(input_name).name + n = 0 + for r in self._all_experiment_records(): + if (r.get("input_name") or "") == base: + n += 1 + return n + + def delete_experiments_for_input(self, input_name: str) -> int: + """删除所有引用该素材的实验记录 / Cascade-delete experiments by input filename.""" + base = Path(input_name).name + ids = [ + str(r.get("id")) + for r in self._all_experiment_records() + if (r.get("input_name") or "") == base and r.get("id") + ] + for eid in ids: + try: + self.delete_experiment(eid) + except (FileNotFoundError, ValueError): + continue + return len(ids) + + def experiment_asset_path(self, experiment_id: str) -> Path: + """实验素材快照文件路径 / Path to snapshot copy for replay.""" + clean = Path(experiment_id).name + jpath = self.experiments_root / f"{clean}.json" + if not jpath.is_file(): + raise FileNotFoundError("实验记录不存在 / Experiment not found") + data = json.loads(jpath.read_text(encoding="utf-8")) + rel = data.get("asset_snapshot_relpath") + if not rel: + raise FileNotFoundError("无素材快照 / No asset snapshot for this record") + p = (self.experiments_root / Path(str(rel)).name).resolve() + er = self.experiments_root.resolve() + if not str(p).startswith(str(er)) or not p.is_file(): + raise FileNotFoundError("快照文件不存在 / Snapshot missing") + return p + + def _all_experiment_records(self) -> list[dict[str, Any]]: + rows: list[dict[str, Any]] = [] + for p in sorted( + self.experiments_root.glob("*.json"), + key=lambda x: x.stat().st_mtime, + reverse=True, + ): + try: + data = json.loads(p.read_text(encoding="utf-8")) + if isinstance(data, dict): + rows.append(data) + except Exception: + continue + return rows + + def list_experiments( + self, + q: str | None, + page: int, + page_size: int, + ) -> dict[str, Any]: + """分页列出实验 / Paginated experiment list.""" + rows = self._all_experiment_records() + if q: + ql = q.lower() + rows = [ + r + for r in rows + if ql in (r.get("input_name") or "").lower() + or ql in (r.get("preset_label") or "").lower() + ] + total = len(rows) + start = max(0, (page - 1) * page_size) + end = start + page_size + return { + "total": total, + "page": page, + "page_size": page_size, + "items": rows[start:end], + } + + def export_experiments_json(self) -> str: + """导出全部实验为 JSON 字符串 / Export all as JSON.""" + rows = self._all_experiment_records() + return json.dumps(rows, ensure_ascii=False, indent=2) + + def export_experiments_csv(self) -> str: + """导出 CSV / Export CSV.""" + items = self._all_experiment_records() + buf = io.StringIO() + w = csv.writer(buf) + w.writerow( + [ + "id", + "created_at", + "input_name", + "preset_label", + "matches", + "rmse_arcsec", + ] + ) + for r in items: + m = r.get("metrics") or {} + w.writerow( + [ + r.get("id"), + r.get("created_at"), + r.get("input_name"), + r.get("preset_label"), + m.get("matches", ""), + m.get("rmse_arcsec", ""), + ] + ) + return buf.getvalue() diff --git a/ogscope/web/api/analysis/routes.py b/ogscope/web/api/analysis/routes.py index 55c7176..0166848 100644 --- a/ogscope/web/api/analysis/routes.py +++ b/ogscope/web/api/analysis/routes.py @@ -2,28 +2,131 @@ 素材分析路由 / Asset analysis routes """ -from fastapi import APIRouter, File, HTTPException, UploadFile -from fastapi import Query +import mimetypes + +from fastapi import APIRouter, File, Form, HTTPException, Query, UploadFile +from fastapi.responses import FileResponse, PlainTextResponse from ogscope.web.api.analysis.services import analysis_service -from ogscope.web.api.models.schemas import AnalysisJobCreateRequest +from ogscope.web.api.models.schemas import ( + AnalysisBatchSolveRequest, + AnalysisExperimentCreate, + AnalysisExtractPreviewRequest, + AnalysisJobCreateRequest, + AnalysisPresetCreate, + AnalysisSolveImageRequest, + AnalysisSolveVideoFrameRequest, + ImportFromDebugRequest, +) router = APIRouter() +@router.get("/analysis/uploads") +async def list_analysis_uploads(): + """列出已上传素材(持久化目录)/ List persisted uploads""" + try: + return analysis_service.list_uploads() + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.get("/analysis/uploads/{filename}/experiment_count") +async def upload_experiment_count(filename: str): + """引用该素材的实验记录条数 / Count experiments for upload.""" + try: + return analysis_service.upload_experiment_count(filename) + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.delete("/analysis/uploads/{filename}") +async def delete_analysis_upload( + filename: str, + delete_experiments: bool = Query( + False, + description="同时删除引用该素材的实验记录 / Also delete linked experiments", + ), +): + """从素材池删除文件及侧车 / Delete file from pool and sidecar.""" + try: + return analysis_service.delete_upload( + filename, delete_experiments=delete_experiments + ) + except FileNotFoundError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.get("/analysis/uploads/{filename}/info") +async def get_analysis_upload_file_info(filename: str): + """上传素材侧车合并信息(与调试 info 形状对齐)/ Upload file + sidecar merged info.""" + try: + return analysis_service.get_upload_file_info(filename) + except FileNotFoundError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.get("/analysis/uploads/file") +async def get_analysis_upload_file( + filename: str = Query(..., description="文件名 / Basename") +): + """下载已上传文件(预览或复用)/ Serve persisted upload for preview or reuse""" + try: + path = analysis_service.resolve_upload_path(filename) + if not path.is_file(): + raise HTTPException(status_code=404, detail="文件不存在 / File not found") + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + suffix = path.suffix.lower() + media_map = { + ".mp4": "video/mp4", + ".m4v": "video/mp4", + ".webm": "video/webm", + ".mov": "video/quicktime", + ".avi": "video/x-msvideo", + } + media = media_map.get(suffix) + if not media: + media, _ = mimetypes.guess_type(path.name) + return FileResponse( + path, + media_type=media or "application/octet-stream", + filename=path.name, + ) + + @router.post("/analysis/upload") -async def upload_analysis_asset(file: UploadFile = File(...)): - """上传素材 / Upload asset""" +async def upload_analysis_asset( + file: UploadFile = File(...), + source: str = Form(default="analysis_upload"), +): + """上传素材 / Upload asset(可选来源标签 / optional source tag)""" try: payload = await file.read() return await analysis_service.save_upload( filename=file.filename or "uploaded.bin", payload=payload, + source=source, ) except Exception as exc: # noqa: BLE001 raise HTTPException(status_code=400, detail=str(exc)) from exc +@router.post("/analysis/uploads/import_from_debug") +async def import_upload_from_debug(body: ImportFromDebugRequest): + """从调试采集目录复制到素材池 / Copy dev_captures file into analysis pool.""" + try: + return analysis_service.import_from_debug_capture(body.filename) + except FileNotFoundError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + @router.post("/analysis/jobs") async def create_analysis_job(payload: AnalysisJobCreateRequest): """创建任务 / Create analysis job""" @@ -35,24 +138,156 @@ async def create_analysis_job(payload: AnalysisJobCreateRequest): hint_dec_deg=payload.hint_dec_deg, frame_step=payload.frame_step, max_frames=payload.max_frames, + fov_estimate=payload.fov_estimate, + fov_max_error=payload.fov_max_error, + solve_timeout_ms=payload.solve_timeout_ms, + centroid=payload.centroid, + max_image_side=payload.max_image_side, + large_scale_bg_subtract=bool(payload.large_scale_bg_subtract), ) except Exception as exc: # noqa: BLE001 raise HTTPException(status_code=400, detail=str(exc)) from exc @router.post("/analysis/solve/image") -async def solve_single_image( - input_name: str = Query(...), - hint_ra_deg: float | None = Query(default=None), - hint_dec_deg: float | None = Query(default=None), +async def solve_single_image(body: AnalysisSolveImageRequest): + """直接解算单图(JSON body)/ Solve single image via JSON body.""" + try: + return await analysis_service.solve_single_image(body) + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.post("/analysis/solve/batch") +async def solve_batch(body: AnalysisBatchSolveRequest): + """同一素材多组参数批量解算 / Batch solve with multiple param sets.""" + try: + return await analysis_service.batch_solve(body) + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.get("/analysis/presets") +async def list_analysis_presets( + scope: str = Query("user", description="official | user") ): - """直接解算单图 / Solve single image directly""" + """列出预设 / List presets.""" + try: + return analysis_service.list_presets(scope) + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.post("/analysis/presets") +async def create_analysis_preset(body: AnalysisPresetCreate): + """创建用户预设 / Create user preset.""" try: - return await analysis_service.solve_single_image( - input_name=input_name, - hint_ra_deg=hint_ra_deg, - hint_dec_deg=hint_dec_deg, + return analysis_service.create_user_preset(body) + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.delete("/analysis/presets/{preset_id}") +async def delete_analysis_preset(preset_id: str): + """删除用户预设 / Delete user preset.""" + try: + analysis_service.delete_user_preset(preset_id) + return {"success": True} + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.post("/analysis/experiments") +async def create_analysis_experiment(body: AnalysisExperimentCreate): + """保存实验记录 / Save experiment record.""" + try: + return analysis_service.create_experiment(body) + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.delete("/analysis/experiments/{experiment_id}") +async def delete_analysis_experiment(experiment_id: str): + """删除一条实验记录 / Delete one experiment record.""" + try: + analysis_service.delete_experiment(experiment_id) + return {"success": True} + except FileNotFoundError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.get("/analysis/experiments") +async def list_analysis_experiments( + q: str | None = Query(None, description="搜索文件名或预设名 / Search"), + page: int = Query(1, ge=1), + page_size: int = Query(20, ge=1, le=200), +): + """实验记录列表 / Experiment list.""" + try: + return analysis_service.list_experiments(q, page, page_size) + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.get("/analysis/experiments/export") +async def export_analysis_experiments( + export_format: str = Query("json", alias="format", description="json | csv"), +): + """导出实验记录 / Export experiments.""" + try: + text = analysis_service.export_experiments(export_format) + media = ( + "application/json" if export_format == "json" else "text/csv; charset=utf-8" ) + return PlainTextResponse(content=text, media_type=media) + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.get("/analysis/settings") +async def analysis_lab_settings(): + """分析台公开默认配置 / Public defaults for analysis lab.""" + try: + return analysis_service.lab_public_settings() + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.post("/analysis/solve/frame") +async def solve_analysis_frame(body: AnalysisSolveVideoFrameRequest): + """相机或视频单帧解算 / Solve one frame from camera or pool video.""" + try: + return await analysis_service.solve_video_frame(body) + except RuntimeError as exc: + raise HTTPException(status_code=503, detail=str(exc)) from exc + except FileNotFoundError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + except ValueError as exc: + raise HTTPException(status_code=400, detail=str(exc)) from exc + except Exception as exc: # noqa: BLE001 + raise HTTPException(status_code=400, detail=str(exc)) from exc + + +@router.get("/analysis/experiments/{experiment_id}/asset") +async def get_experiment_asset_file(experiment_id: str): + """实验素材快照(用于回放)/ Experiment asset snapshot for replay.""" + try: + path = analysis_service.get_experiment_asset_path(experiment_id) + media, _ = mimetypes.guess_type(path.name) + return FileResponse(path, media_type=media or "application/octet-stream") + except FileNotFoundError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + + +@router.post("/analysis/extract/preview") +async def extract_centroid_preview(body: AnalysisExtractPreviewRequest): + """提星二值掩膜预览(不调解算)/ Preview centroid binary mask without plate solve.""" + try: + return await analysis_service.extract_preview(body) except Exception as exc: # noqa: BLE001 raise HTTPException(status_code=400, detail=str(exc)) from exc diff --git a/ogscope/web/api/analysis/services.py b/ogscope/web/api/analysis/services.py index 95d03ec..76514f8 100644 --- a/ogscope/web/api/analysis/services.py +++ b/ogscope/web/api/analysis/services.py @@ -4,8 +4,12 @@ from __future__ import annotations +import asyncio import json +import shutil +import time import uuid +from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass, field from datetime import datetime, timezone from pathlib import Path @@ -13,9 +17,94 @@ import cv2 -from ogscope.algorithms.plate_solve import PlateSolver +from ogscope.algorithms.plate_solve import ( + CentroidExtractionParams, + PlateSolver, + centroid_extraction_preview, + merge_centroid_params, +) from ogscope.algorithms.star_extract import StarExtractor from ogscope.config import get_settings +from ogscope.web.api.analysis.lab_store import AnalysisLabStore +from ogscope.web.api.models.schemas import ( + AnalysisBatchSolveRequest, + AnalysisExperimentCreate, + AnalysisExtractPreviewRequest, + AnalysisPresetCreate, + AnalysisSolveImageRequest, + AnalysisSolveVideoFrameRequest, + CentroidParamsPayload, +) + +_SOLVE_PROFILE_DEFAULT = "balanced" +_SOLVE_PROFILE_OVERRIDES: dict[str, dict[str, Any]] = { + "speed": { + "timeout_ms": 1000, + "max_stars": 40, + "centroid": { + "sigma": 3.4, + "min_area": 8, + "max_area": 280, + "binary_open": True, + "max_axis_ratio": 2.2, + }, + }, + "balanced": { + "timeout_ms": 1500, + "max_stars": 60, + "centroid": { + "sigma": 3.0, + "min_area": 6, + "max_area": 360, + "binary_open": True, + "max_axis_ratio": 2.8, + }, + }, + "robust": { + "timeout_ms": 3000, + "max_stars": 90, + "centroid": { + "sigma": 2.5, + "min_area": 4, + "max_area": 500, + "binary_open": True, + "max_axis_ratio": None, + }, + }, +} + + +def _merge_debug_style_sidecar_into_info( + info: dict[str, Any], capture_info: dict[str, Any] +) -> None: + """将侧车 JSON 的 camera/extra 展开到顶层,与调试页 info 一致 / Match debug file info shape.""" + cam = capture_info.get("camera") + if isinstance(cam, dict): + for k in ( + "exposure_us", + "analogue_gain", + "digital_gain", + "fps", + "auto_exposure", + "rotation", + "sampling_mode", + "color_mode", + "sensor", + "resolution", + ): + if k not in capture_info and k in cam: + capture_info[k] = cam[k] + if capture_info.get("resolution") is None: + ow = cam.get("output_width") or cam.get("width") + oh = cam.get("output_height") or cam.get("height") + if ow and oh: + capture_info["resolution"] = f"{ow}x{oh}" + extra = capture_info.get("extra") + if isinstance(extra, dict): + for k, v in extra.items(): + if k not in capture_info: + capture_info[k] = v + info.update(capture_info) @dataclass(slots=True) @@ -57,19 +146,152 @@ def __init__(self) -> None: self.upload_root.mkdir(parents=True, exist_ok=True) self.jobs_root.mkdir(parents=True, exist_ok=True) self.results_root.mkdir(parents=True, exist_ok=True) + # 解算专用线程池(避免与相机预览等争用默认线程池)/ Dedicated executor for solving tasks + self._solver_executor = ThreadPoolExecutor( + max_workers=2, thread_name_prefix="solver" + ) + self._solver_max_stars = settings.solver_max_stars self.extractor = StarExtractor(max_stars=settings.solver_max_stars) - self.solver = PlateSolver(fov_deg=settings.solver_fov_deg) + self.solver = PlateSolver( + fov_deg=settings.solver_fov_deg, + fov_max_error_deg=settings.solver_fov_max_error_deg, + solve_timeout_ms=settings.solver_timeout_ms, + ) self.default_hint_ra = settings.solver_hint_ra_deg self.default_hint_dec = settings.solver_hint_dec_deg self._jobs: dict[str, AnalysisJob] = {} + self._lab = AnalysisLabStore(settings) + + def _centroid_params_from_payload( + self, payload: CentroidParamsPayload | None + ) -> CentroidExtractionParams | None: + """合并请求中的提星覆盖项与默认配置 / Merge API overrides with Settings defaults.""" + if payload is None: + return None + base = CentroidExtractionParams.from_settings(get_settings()) + return merge_centroid_params(base, payload.model_dump(exclude_none=True)) + + def _resolve_solve_profile( + self, + profile_name: str | None, + payload: CentroidParamsPayload | None, + solve_timeout_ms: int | None, + ) -> tuple[CentroidExtractionParams, int, int, str]: + """解析解算分档并返回参数 / Resolve solve profile into concrete params.""" + settings = get_settings() + effective = str(profile_name or _SOLVE_PROFILE_DEFAULT).lower() + if effective not in _SOLVE_PROFILE_OVERRIDES: + effective = _SOLVE_PROFILE_DEFAULT + + profile_cfg = _SOLVE_PROFILE_OVERRIDES[effective] + base = CentroidExtractionParams.from_settings(settings) + centroid = merge_centroid_params(base, profile_cfg.get("centroid", {})) + if payload is not None: + centroid = merge_centroid_params( + centroid, payload.model_dump(exclude_none=True) + ) + + max_stars = int(profile_cfg.get("max_stars", self._solver_max_stars)) + timeout_ms = int( + solve_timeout_ms + if solve_timeout_ms is not None + else profile_cfg.get("timeout_ms", settings.solver_timeout_ms) + ) + return centroid, max(4, max_stars), max(200, timeout_ms), effective + + def resolve_upload_path(self, filename: str) -> Path: + """解析上传目录内安全路径(仅单层文件名)/ Safe path under upload_root (basename only).""" + clean = filename.strip() + name = Path(clean).name + if not name or name != clean: + raise ValueError("文件名无效 / Invalid filename") + path = (self.upload_root / name).resolve() + root = self.upload_root.resolve() + try: + path.relative_to(root) + except ValueError as exc: + raise ValueError("路径非法 / Invalid path") from exc + return path + + def get_upload_file_info(self, filename: str) -> dict[str, Any]: + """从上传目录读取文件与 stem.txt 侧车 / File + optional sidecar from upload pool.""" + path = self.resolve_upload_path(filename) + if not path.is_file(): + raise FileNotFoundError("上传文件不存在 / Uploaded file not found") + st = path.stat() + image_ext = {".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".webp"} + video_ext = { + ".mp4", + ".avi", + ".mov", + ".mkv", + ".wmv", + ".flv", + ".webm", + ".m4v", + } + suffix = path.suffix.lower() + file_type = ( + "image" + if suffix in image_ext + else "video" if suffix in video_ext else "file" + ) + info: dict[str, Any] = { + "filename": path.name, + "size": st.st_size, + "modified": datetime.fromtimestamp( + st.st_mtime, tz=timezone.utc + ).isoformat(), + "type": file_type, + } + sidecar = self.upload_root / f"{path.stem}.txt" + if sidecar.is_file(): + try: + raw = json.loads(sidecar.read_text(encoding="utf-8")) + if isinstance(raw, dict): + _merge_debug_style_sidecar_into_info(info, raw) + except (json.JSONDecodeError, OSError): + pass + return info - async def save_upload(self, filename: str, payload: bytes) -> dict[str, Any]: + def list_uploads(self) -> dict[str, Any]: + """列出已持久化上传的文件 / List persisted uploads (flat, no recursion).""" + root = self.upload_root + files: list[dict[str, Any]] = [] + if not root.is_dir(): + return {"upload_dir": str(root.resolve()), "files": []} + for p in root.iterdir(): + if not p.is_file(): + continue + if p.name.startswith("."): + continue + if p.name == "manifest.json": + continue + # 侧车 .txt 不单独列入素材池 / Hide sidecar metadata from pool list + if p.suffix.lower() == ".txt": + continue + st = p.stat() + base = { + "filename": p.name, + "size": st.st_size, + "modified_at": datetime.fromtimestamp( + st.st_mtime, tz=timezone.utc + ).isoformat(), + } + files.append(self._lab.merge_list_entry(p.name, base)) + files.sort(key=lambda x: x["modified_at"], reverse=True) + return {"upload_dir": str(root.resolve()), "files": files} + + async def save_upload( + self, filename: str, payload: bytes, source: str = "analysis_upload" + ) -> dict[str, Any]: """保存上传文件 / Save uploaded file""" safe_name = Path(filename).name if not safe_name: raise ValueError("文件名无效 / Invalid filename") target = self.upload_root / safe_name target.write_bytes(payload) + self._lab.set_file_source(safe_name, source) return { "success": True, "filename": safe_name, @@ -85,37 +307,63 @@ async def create_job( hint_dec_deg: float | None = None, frame_step: int = 1, max_frames: int = 180, + fov_estimate: float | None = None, + fov_max_error: float | None = None, + solve_timeout_ms: int | None = None, + centroid: CentroidParamsPayload | None = None, + max_image_side: int | None = None, + large_scale_bg_subtract: bool = False, ) -> dict[str, Any]: """创建并执行任务 / Create and execute job""" if input_type not in {"image", "video"}: - raise ValueError("input_type 仅支持 image 或 video / input_type must be image or video") + raise ValueError( + "input_type 仅支持 image 或 video / input_type must be image or video" + ) source = self.upload_root / Path(input_name).name if not source.exists(): raise FileNotFoundError("上传文件不存在 / Uploaded file not found") - job = AnalysisJob(job_id=str(uuid.uuid4()), input_name=source.name, input_type=input_type) + job = AnalysisJob( + job_id=str(uuid.uuid4()), input_name=source.name, input_type=input_type + ) self._jobs[job.job_id] = job self._persist_job(job) + centroid_params = self._centroid_params_from_payload(centroid) try: job.status = "running" job.message = "开始分析 / Analysis started" self._persist_job(job) + loop = asyncio.get_running_loop() if input_type == "image": - results = self._analyze_image( - source=source, - hint_ra_deg=hint_ra_deg, - hint_dec_deg=hint_dec_deg, + results = await loop.run_in_executor( + self._solver_executor, + self._analyze_image, + source, + hint_ra_deg, + hint_dec_deg, + fov_estimate, + fov_max_error, + solve_timeout_ms, + centroid_params, + max_image_side, + None, + large_scale_bg_subtract, ) else: - results = self._analyze_video( - source=source, - hint_ra_deg=hint_ra_deg, - hint_dec_deg=hint_dec_deg, - frame_step=frame_step, - max_frames=max_frames, - job=job, + results = await loop.run_in_executor( + self._solver_executor, + self._analyze_video, + source, + hint_ra_deg, + hint_dec_deg, + frame_step, + max_frames, + job, + fov_estimate, + fov_max_error, + solve_timeout_ms, ) result_path = self.results_root / f"{job.job_id}.json" result_payload = { @@ -141,23 +389,273 @@ async def create_job( return job.to_dict() async def solve_single_image( - self, input_name: str, hint_ra_deg: float | None = None, hint_dec_deg: float | None = None + self, body: AnalysisSolveImageRequest ) -> dict[str, Any]: - """直接解算单图 / Solve a single image directly""" - source = self.upload_root / Path(input_name).name + """直接解算单图(JSON body)/ Solve a single image via JSON body.""" + source = self.upload_root / Path(body.input_name).name if not source.exists(): raise FileNotFoundError("上传文件不存在 / Uploaded file not found") - rows = self._analyze_image( - source=source, - hint_ra_deg=hint_ra_deg, - hint_dec_deg=hint_dec_deg, + loop = asyncio.get_running_loop() + # 档位与两段策略解析 / Resolve profile and two-stage strategy + centroid_params, max_stars, timeout_ms, requested_profile = ( + self._resolve_solve_profile( + body.solve_profile, body.centroid, body.solve_timeout_ms + ) ) + + ls_bg = bool(body.large_scale_bg_subtract) + + def _run_single() -> list[dict[str, Any]]: + return self._analyze_image( + source=source, + hint_ra_deg=body.hint_ra_deg, + hint_dec_deg=body.hint_dec_deg, + fov_estimate=body.fov_estimate, + fov_max_error=body.fov_max_error, + solve_timeout_ms=timeout_ms, + centroid_params=centroid_params, + max_image_side=body.max_image_side, + max_stars=max_stars, + large_scale_bg_subtract=ls_bg, + ) + + def _run_two_stage() -> list[dict[str, Any]]: + """平衡档位使用 speed→robust 两段策略 / Balanced profile: speed then robust fallback.""" + # 第 1 段:speed 档快速尝试 / Stage 1: quick speed attempt + speed_centroid, speed_max_stars, speed_timeout_ms, _ = ( + self._resolve_solve_profile( + "speed", body.centroid, body.solve_timeout_ms + ) + ) + first = self._analyze_image( + source=source, + hint_ra_deg=body.hint_ra_deg, + hint_dec_deg=body.hint_dec_deg, + fov_estimate=body.fov_estimate, + fov_max_error=body.fov_max_error, + solve_timeout_ms=speed_timeout_ms, + centroid_params=speed_centroid, + max_image_side=body.max_image_side, + max_stars=speed_max_stars, + large_scale_bg_subtract=ls_bg, + ) + row0 = first[0] if first else None + if row0 and row0.get("status") == "MATCH_FOUND": + row0["solve_profile"] = "speed" + return [row0] + + # 噪点图动态 max_stars 收紧 / Heuristic: tighten max_stars for noisy frames + detected = int(row0.get("detected_stars") or 0) if row0 else 0 + robust_centroid, robust_max_stars, robust_timeout_ms, _ = ( + self._resolve_solve_profile( + "robust", body.centroid, body.solve_timeout_ms + ) + ) + if detected > 0 and detected > robust_max_stars: + robust_max_stars = max(20, int(robust_max_stars * 0.7)) + + second = self._analyze_image( + source=source, + hint_ra_deg=body.hint_ra_deg, + hint_dec_deg=body.hint_dec_deg, + fov_estimate=body.fov_estimate, + fov_max_error=body.fov_max_error, + solve_timeout_ms=robust_timeout_ms, + centroid_params=robust_centroid, + max_image_side=body.max_image_side, + max_stars=robust_max_stars, + large_scale_bg_subtract=ls_bg, + ) + if second: + second[0]["solve_profile"] = "robust" + return second + + # balanced 档默认启用两段策略,其他档位单次解算 / Balanced uses two-stage, others single-pass + if requested_profile == "balanced": + rows = await loop.run_in_executor(self._solver_executor, _run_two_stage) + effective_profile = ( + rows[0].get("solve_profile") + if rows and rows[0].get("solve_profile") + else "balanced" + ) + else: + rows = await loop.run_in_executor(self._solver_executor, _run_single) + effective_profile = requested_profile + + row = rows[0] if rows else None + if row and "solve_profile" not in row: + row["solve_profile"] = effective_profile + # 默认精简 raw,大字段仅在 detail_level==full 时返回 / Drop heavy raw unless client asks for full detail. + detail_level = getattr(body, "detail_level", None) or "summary" + if row and detail_level != "full": + row.pop("tetra", None) + if row: + self._lab.update_last_solve( + source.name, + self._metrics_from_solve_row(row), + ) return { "success": True, "input_name": source.name, - "result": rows[0] if rows else None, + "result": row, + } + + @staticmethod + def _metrics_from_solve_row(row: dict[str, Any]) -> dict[str, Any]: + """提取列表与实验用指标 / Metrics for manifest and experiments.""" + return { + "matches": row.get("matches"), + "rmse_arcsec": row.get("rmse_arcsec"), + "status": row.get("status"), + "prob": row.get("prob"), + "t_solve_ms": row.get("t_solve_ms"), + } + + async def batch_solve(self, body: AnalysisBatchSolveRequest) -> dict[str, Any]: + """多组参数顺序解算同一文件 / Batch solve same file with multiple param sets.""" + results: list[dict[str, Any]] = [] + for run in body.runs: + params = run.params.model_dump(exclude_none=True) + req = AnalysisSolveImageRequest.model_validate( + {"input_name": body.input_name, **params} + ) + try: + out = await self.solve_single_image(req) + results.append( + { + "label": run.label, + "success": True, + "result": out.get("result"), + "input_name": out.get("input_name"), + } + ) + except Exception as exc: # noqa: BLE001 + results.append( + { + "label": run.label, + "success": False, + "error": str(exc), + } + ) + return {"input_name": body.input_name, "results": results} + + def import_from_debug_capture(self, filename: str) -> dict[str, Any]: + """从 ~/dev_captures 复制到分析素材池并标记来源 / Copy debug capture into pool.""" + src = Path.home() / "dev_captures" / Path(filename).name + if not src.is_file(): + raise FileNotFoundError( + "调试采集文件不存在 / Debug capture file not found in dev_captures" + ) + dst = self.upload_root / src.name + shutil.copy2(src, dst) + side_txt = Path.home() / "dev_captures" / f"{src.stem}.txt" + if side_txt.is_file(): + shutil.copy2(side_txt, self.upload_root / side_txt.name) + self._lab.set_file_source(dst.name, "debug_console") + return { + "success": True, + "filename": dst.name, + "size": dst.stat().st_size, } + def list_presets(self, scope: str) -> dict[str, Any]: + """列出官方或用户预设 / List official or user presets.""" + if scope not in {"official", "user"}: + raise ValueError( + "scope 须为 official 或 user / scope must be official or user" + ) + return {"scope": scope, "presets": self._lab.list_presets(scope)} + + def create_user_preset(self, body: AnalysisPresetCreate) -> dict[str, Any]: + """创建用户预设 / Create user preset.""" + params = body.params.model_dump(exclude_none=True) + return self._lab.save_user_preset(body.name, params) + + def delete_user_preset(self, preset_id: str) -> None: + """删除用户预设 / Delete user preset.""" + self._lab.delete_user_preset(preset_id) + + def create_experiment(self, body: AnalysisExperimentCreate) -> dict[str, Any]: + """保存实验记录 / Save experiment record.""" + return self._lab.create_experiment( + input_name=body.input_name, + preset_label=body.preset_label, + result_json=body.result_json, + metrics=body.metrics, + thumbnail_png_base64=body.thumbnail_png_base64, + replay=body.replay, + save_asset_snapshot=body.save_asset_snapshot, + ) + + def list_experiments( + self, q: str | None, page: int, page_size: int + ) -> dict[str, Any]: + """分页实验列表 / Paginated experiments.""" + return self._lab.list_experiments(q, page, page_size) + + def delete_upload( + self, filename: str, delete_experiments: bool = False + ) -> dict[str, Any]: + """删除素材池文件及 stem.txt 侧车;可选级联实验记录 / Delete pool file and sidecar; optional cascade.""" + path = self.resolve_upload_path(filename) + if path.name == "manifest.json": + raise ValueError("不可删除清单文件 / Cannot delete manifest") + if not path.is_file(): + raise FileNotFoundError("上传文件不存在 / Uploaded file not found") + n_exp = 0 + if delete_experiments: + n_exp = self._lab.delete_experiments_for_input(path.name) + path.unlink() + side = self.upload_root / f"{path.stem}.txt" + if side.is_file(): + side.unlink() + self._lab.remove_manifest_entry(path.name) + return {"success": True, "filename": path.name, "deleted_experiments": n_exp} + + def delete_experiment(self, experiment_id: str) -> None: + """删除一条实验记录 / Delete one experiment record.""" + self._lab.delete_experiment(experiment_id) + + def export_experiments(self, fmt: str) -> str: + """导出实验记录 / Export experiments.""" + if fmt == "json": + return self._lab.export_experiments_json() + if fmt == "csv": + return self._lab.export_experiments_csv() + raise ValueError("format 须为 json 或 csv / format must be json or csv") + + async def extract_preview( + self, body: AnalysisExtractPreviewRequest + ) -> dict[str, Any]: + """提星二值掩膜预览(不调 Tetra3 解算)/ Preview binary mask without plate solve.""" + source = self.upload_root / Path(body.input_name).name + if not source.exists(): + raise FileNotFoundError("上传文件不存在 / Uploaded file not found") + centroid_params = self._centroid_params_from_payload(body.centroid) + settings = get_settings() + max_side = ( + body.max_image_side + if body.max_image_side is not None + else settings.solver_max_image_side + ) + if centroid_params is None: + centroid_params = CentroidExtractionParams.from_settings(settings) + + def _run() -> dict[str, Any]: + frame = cv2.imread(str(source), cv2.IMREAD_COLOR) + if frame is None: + raise ValueError("无法读取图片 / Unable to read image") + return centroid_extraction_preview( + frame, + max_stars=self._solver_max_stars, + centroid_params=centroid_params, + max_image_side=int(max_side), + large_scale_bg_subtract=bool(body.large_scale_bg_subtract), + downsample_max_side=int(settings.solver_large_scale_bg_downsample), + ) + + return await asyncio.to_thread(_run) + async def get_job_status(self, job_id: str) -> dict[str, Any]: """获取任务状态 / Get job status""" job = self._jobs.get(job_id) @@ -187,22 +685,75 @@ def _persist_job(self, job: AnalysisJob) -> None: json.dumps(job.to_dict(), ensure_ascii=False, indent=2), encoding="utf-8" ) + def _solve_bgr_to_row( + self, + frame_bgr: Any, + hint_ra_deg: float | None, + hint_dec_deg: float | None, + fov_estimate: float | None = None, + fov_max_error: float | None = None, + solve_timeout_ms: int | None = None, + centroid_params: CentroidExtractionParams | None = None, + max_image_side: int | None = None, + max_stars: int | None = None, + large_scale_bg_subtract: bool = False, + ) -> dict[str, Any]: + """BGR 帧送 Tetra3 解算 / Plate-solve one BGR frame.""" + solved = self.solver.solve_from_bgr_frame( + frame_bgr=frame_bgr, + max_stars=int( + max_stars if max_stars is not None else self._solver_max_stars + ), + hint_ra_deg=( + hint_ra_deg if hint_ra_deg is not None else self.default_hint_ra + ), + hint_dec_deg=( + hint_dec_deg if hint_dec_deg is not None else self.default_hint_dec + ), + solve_source="full", + fov_estimate=fov_estimate, + fov_max_error=fov_max_error, + solve_timeout_ms=solve_timeout_ms, + centroid_params=centroid_params, + max_image_side=max_image_side, + large_scale_bg_subtract=large_scale_bg_subtract, + ) + return {"frame_index": 0, **solved.to_dict()} + def _analyze_image( - self, source: Path, hint_ra_deg: float | None, hint_dec_deg: float | None + self, + source: Path, + hint_ra_deg: float | None, + hint_dec_deg: float | None, + fov_estimate: float | None = None, + fov_max_error: float | None = None, + solve_timeout_ms: int | None = None, + centroid_params: CentroidExtractionParams | None = None, + max_image_side: int | None = None, + max_stars: int | None = None, + large_scale_bg_subtract: bool = False, ) -> list[dict[str, Any]]: """分析单图 / Analyze image""" + t_total = time.perf_counter() + t_decode = time.perf_counter() frame = cv2.imread(str(source), cv2.IMREAD_COLOR) + t_open_decode_ms = (time.perf_counter() - t_decode) * 1000.0 if frame is None: raise ValueError("无法读取图片 / Unable to read image") - stars = self.extractor.extract(frame) - solved = self.solver.solve( - stars=stars, - frame_shape=frame.shape, - hint_ra_deg=hint_ra_deg if hint_ra_deg is not None else self.default_hint_ra, - hint_dec_deg=hint_dec_deg if hint_dec_deg is not None else self.default_hint_dec, - solve_source="full", + row = self._solve_bgr_to_row( + frame, + hint_ra_deg, + hint_dec_deg, + fov_estimate=fov_estimate, + fov_max_error=fov_max_error, + solve_timeout_ms=solve_timeout_ms, + centroid_params=centroid_params, + max_image_side=max_image_side, + max_stars=max_stars, + large_scale_bg_subtract=large_scale_bg_subtract, ) - row = {"frame_index": 0, **solved.to_dict()} + row["t_open_decode_ms"] = round(t_open_decode_ms, 3) + row["t_backend_total_ms"] = round((time.perf_counter() - t_total) * 1000.0, 3) return [row] def _analyze_video( @@ -213,6 +764,9 @@ def _analyze_video( frame_step: int, max_frames: int, job: AnalysisJob, + fov_estimate: float | None = None, + fov_max_error: float | None = None, + solve_timeout_ms: int | None = None, ) -> list[dict[str, Any]]: """分析视频 / Analyze video""" cap = cv2.VideoCapture(str(source)) @@ -240,6 +794,9 @@ def _analyze_video( hint_ra_deg=hint_ra, hint_dec_deg=hint_dec, solve_source="full", + fov_estimate=fov_estimate, + fov_max_error=fov_max_error, + solve_timeout_ms=solve_timeout_ms, ) hint_ra = solved.ra_deg hint_dec = solved.dec_deg @@ -251,5 +808,105 @@ def _analyze_video( cap.release() return results + async def solve_video_frame( + self, body: AnalysisSolveVideoFrameRequest + ) -> dict[str, Any]: + """相机或视频文件单帧解算 / Single-frame solve from camera or video file.""" + t_total = time.perf_counter() + t_open_decode_ms = None + frame = None + frame_id = None + frame_ts = None + if body.source == "camera": + from ogscope.web.camera_shared import get_camera_manager + + t_decode = time.perf_counter() + frame, frame_id, frame_ts = await get_camera_manager().get_raw_frame() + t_open_decode_ms = (time.perf_counter() - t_decode) * 1000.0 + else: + if not body.input_name: + raise ValueError( + "需要 input_name / input_name required for file source" + ) + path = self.resolve_upload_path(body.input_name) + if not path.is_file(): + raise FileNotFoundError("上传文件不存在 / Uploaded file not found") + t_decode = time.perf_counter() + cap = cv2.VideoCapture(str(path)) + if not cap.isOpened(): + raise ValueError("无法打开视频 / Cannot open video") + try: + if body.time_sec is not None: + cap.set(cv2.CAP_PROP_POS_MSEC, float(body.time_sec) * 1000.0) + else: + cap.set(cv2.CAP_PROP_POS_FRAMES, float(body.frame_index)) + ok, frame = cap.read() + if not ok or frame is None: + raise ValueError("无法读取视频帧 / Cannot read video frame") + finally: + cap.release() + t_open_decode_ms = (time.perf_counter() - t_decode) * 1000.0 + centroid_params, max_stars, timeout_ms, effective_profile = ( + self._resolve_solve_profile( + body.solve_profile, body.centroid, body.solve_timeout_ms + ) + ) + loop = asyncio.get_running_loop() + + def _run() -> dict[str, Any]: + return self._solve_bgr_to_row( + frame, + body.hint_ra_deg, + body.hint_dec_deg, + body.fov_estimate, + body.fov_max_error, + timeout_ms, + centroid_params, + body.max_image_side, + max_stars, + bool(body.large_scale_bg_subtract), + ) + + row = await loop.run_in_executor(self._solver_executor, _run) + if t_open_decode_ms is not None: + row["t_open_decode_ms"] = round(t_open_decode_ms, 3) + row["t_backend_total_ms"] = round((time.perf_counter() - t_total) * 1000.0, 3) + row["solve_profile"] = effective_profile + # 默认精简 raw,大字段仅在 detail_level==full 时返回 / Drop heavy raw unless client asks for full detail. + detail_level = getattr(body, "detail_level", None) or "summary" + if detail_level != "full": + row.pop("tetra", None) + return { + "success": True, + "input_name": body.input_name or "", + "result": row, + "frame_id": frame_id, + "frame_ts": frame_ts, + } + + def lab_public_settings(self) -> dict[str, Any]: + """分析台默认参数(供前端)/ Public defaults for analysis UI.""" + s = get_settings() + return { + "solver_timeout_ms": s.solver_timeout_ms, + "star_analysis_target_fps": s.star_analysis_target_fps, + "camera_width": s.camera_width, + "camera_height": s.camera_height, + "camera_fps": s.camera_fps, + "solver_fov_deg": s.solver_fov_deg, + "solver_max_image_side": s.solver_max_image_side, + "solver_large_scale_bg_downsample": s.solver_large_scale_bg_downsample, + "solve_profile_default": _SOLVE_PROFILE_DEFAULT, + "solve_profiles": list(_SOLVE_PROFILE_OVERRIDES.keys()), + } + + def upload_experiment_count(self, filename: str) -> dict[str, Any]: + """引用该素材的实验条数 / Number of experiments referencing upload.""" + return {"count": self._lab.count_experiments_for_input(filename)} + + def get_experiment_asset_path(self, experiment_id: str) -> Path: + """实验快照路径 / Snapshot path for replay.""" + return self._lab.experiment_asset_path(experiment_id) + analysis_service = AnalysisService() diff --git a/ogscope/web/api/camera/routes.py b/ogscope/web/api/camera/routes.py index f04ad23..e1dad22 100644 --- a/ogscope/web/api/camera/routes.py +++ b/ogscope/web/api/camera/routes.py @@ -2,18 +2,19 @@ 相机相关API路由 / Camera-related API routes 支持真实相机和模拟模式 / Supports real camera and simulation mode """ -from fastapi import APIRouter, HTTPException + +import io +import logging + +from fastapi import APIRouter, HTTPException, Query from fastapi.responses import StreamingResponse -from ogscope.utils.environment import should_use_simulation_mode, get_simulation_config -from ogscope.hardware.camera import create_camera + +from ogscope.utils.environment import get_simulation_config, should_use_simulation_mode from ogscope.utils.virtual_stream import get_virtual_stream -import logging -import io logger = logging.getLogger(__name__) router = APIRouter() -_camera_instance = None _is_streaming = False _simulation_mode = should_use_simulation_mode() @@ -21,8 +22,7 @@ logger.info("检测到非树莓派环境,启用模拟模式") _virtual_stream = get_virtual_stream() else: - logger.info("检测到树莓派环境,使用真实相机") - _camera_instance = None + logger.info("检测到树莓派环境,使用真实相机(与调试/分析共用单例)") @router.get("/camera/status") @@ -35,110 +35,70 @@ async def get_camera_status(): "resolution": [1920, 1080], "fps": 30, "mode": "simulation", - "simulation_config": get_simulation_config() + "simulation_config": get_simulation_config(), } else: - connected = False - streaming = False - width, height, fps = 1920, 1080, 30 try: - global _camera_instance - if _camera_instance is not None: - connected = getattr(_camera_instance, "is_initialized", False) - streaming = getattr(_camera_instance, "is_capturing", False) - width = getattr(_camera_instance, "width", width) - height = getattr(_camera_instance, "height", height) - fps = getattr(_camera_instance, "fps", fps) + from ogscope.web.camera_shared import get_camera_manager + + status = await get_camera_manager().status() + info = status.get("info", {}) if isinstance(status, dict) else {} + width = int(info.get("output_width") or info.get("width") or 1920) + height = int(info.get("output_height") or info.get("height") or 1080) + fps = int(info.get("fps") or 30) except Exception as e: logger.error(f"读取相机状态失败: {e}") + status = {"connected": False, "streaming": False} + width, height, fps = 1920, 1080, 30 return { - "connected": bool(connected), - "streaming": bool(streaming), + "connected": bool(status.get("connected")), + "streaming": bool(status.get("streaming")), "resolution": [int(width), int(height)], "fps": int(fps), - "mode": "real" + "mode": "real", + "runtime_overrides": status.get("runtime_overrides", {}), } @router.get("/camera/preview") -async def get_camera_preview(): +async def get_camera_preview(since_frame_id: int | None = Query(default=None)): """获取相机预览图(JPEG) / Get camera preview (JPEG)""" if _simulation_mode: if not _is_streaming: # 返回静态占位符图像 / Return static placeholder image placeholder_image = io.BytesIO() - placeholder_image.write(b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01\x90\x00\x00\x00\xf0\x08\x02\x00\x00\x00') + placeholder_image.write( + b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01\x90\x00\x00\x00\xf0\x08\x02\x00\x00\x00" + ) placeholder_image.seek(0) - + return StreamingResponse( placeholder_image, media_type="image/png", - headers={"Cache-Control": "no-cache"} + headers={"Cache-Control": "no-cache"}, ) - + # 生成虚拟视频帧 / Generate virtual video frames try: frame_data = _virtual_stream.generate_frame() return StreamingResponse( io.BytesIO(frame_data), media_type="image/jpeg", - headers={"Cache-Control": "no-cache"} + headers={"Cache-Control": "no-cache"}, ) except Exception as e: logger.error(f"生成虚拟视频帧失败: {e}") raise HTTPException(status_code=500, detail="生成视频帧失败") else: try: - global _camera_instance - # 懒加载初始化与启动,避免前端必须显式调用 start / Lazy loading initialization and startup to avoid the front end having to explicitly call start - if _camera_instance is None or not getattr(_camera_instance, "is_initialized", False): - from ogscope.config import get_settings - settings = get_settings() - cam_cfg = { - "width": getattr(settings, "camera_width", 640), - "height": getattr(settings, "camera_height", 360), - "fps": getattr(settings, "camera_fps", 5), - "exposure_us": getattr(settings, "camera_exposure", 10000), - "analogue_gain": getattr(settings, "camera_gain", 1.0), - "digital_gain": getattr(settings, "camera_digital_gain", 1.0), - "auto_exposure": getattr(settings, "camera_auto_exposure", False), - "auto_gain": getattr(settings, "camera_auto_gain", False), - "rotation": getattr(settings, "camera_rotation", 0), - "sampling_mode": getattr(settings, "camera_sampling_mode", "supersample"), - "type": getattr(settings, "camera_type", "imx327_mipi"), - } - _camera_instance = create_camera(cam_cfg) - if _camera_instance is None: - raise HTTPException(status_code=500, detail="创建相机失败") - if not _camera_instance.initialize(): - raise HTTPException(status_code=500, detail="相机初始化失败") - if not getattr(_camera_instance, "is_capturing", False): - if not _camera_instance.start_capture(): - raise HTTPException(status_code=500, detail="相机未能启动") - - # 获取一帧并编码为JPEG / Get a frame and encode to JPEG - frame = _camera_instance.get_video_frame() - if frame is None: - raise HTTPException(status_code=500, detail="无法获取视频帧") - try: - import cv2 - ok, buf = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 85]) - if not ok: - raise RuntimeError("图像编码失败") - data = buf.tobytes() - except Exception as e: - logger.error(f"编码JPEG失败: {e}") - raise HTTPException(status_code=500, detail="编码失败") + # 与调试台共用帧总线;通过 since_frame_id 减少重复 JPEG 下发。 + # Shared frame bus with debug console; use since_frame_id to avoid duplicate payload. + from ogscope.web.api.debug.services import DebugCameraService - return StreamingResponse( - io.BytesIO(data), - media_type="image/jpeg", - headers={"Cache-Control": "no-cache"} - ) + return await DebugCameraService.get_preview(since_frame_id=since_frame_id) except HTTPException: raise except Exception as e: logger.error(f"获取真实相机预览失败: {e}") raise HTTPException(status_code=500, detail="获取预览失败") - diff --git a/ogscope/web/api/catalog/__init__.py b/ogscope/web/api/catalog/__init__.py deleted file mode 100644 index 266ede0..0000000 --- a/ogscope/web/api/catalog/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -星表 API 包 / Catalog API package -""" diff --git a/ogscope/web/api/catalog/routes.py b/ogscope/web/api/catalog/routes.py deleted file mode 100644 index f048400..0000000 --- a/ogscope/web/api/catalog/routes.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -星表管理路由 / Catalog management routes -""" - -from fastapi import APIRouter, HTTPException, Query - -from ogscope.web.api.catalog.services import CatalogApiService -from ogscope.web.api.models.schemas import ( - CatalogBuildIndexRequest, - CatalogDownloadRequest, - CatalogStarUpsertRequest, -) - -router = APIRouter() - - -@router.post("/catalog/download") -async def download_catalog(payload: CatalogDownloadRequest): - """下载星表 / Download catalog""" - try: - return await CatalogApiService.download_catalog( - source=payload.source, - url=payload.url, - magnitude_limit=payload.magnitude_limit, - ) - except Exception as exc: # noqa: BLE001 - raise HTTPException(status_code=400, detail=str(exc)) from exc - - -@router.post("/catalog/build-index") -async def build_catalog_index(payload: CatalogBuildIndexRequest): - """构建索引 / Build index""" - try: - return await CatalogApiService.build_index( - magnitude_limit=payload.magnitude_limit, - ra_bin_size_deg=payload.ra_bin_size_deg, - ) - except Exception as exc: # noqa: BLE001 - raise HTTPException(status_code=400, detail=str(exc)) from exc - - -@router.get("/catalog/status") -async def get_catalog_status(): - """获取状态 / Get status""" - try: - return await CatalogApiService.get_status() - except Exception as exc: # noqa: BLE001 - raise HTTPException(status_code=500, detail=str(exc)) from exc - - -@router.get("/catalog/stars") -async def list_catalog_stars( - limit: int = Query(default=100, ge=1, le=2000), - offset: int = Query(default=0, ge=0), - source_query: str | None = Query(default=None), - min_mag: float | None = Query(default=None), - max_mag: float | None = Query(default=None), -): - """分页查询星点 / List stars""" - try: - return await CatalogApiService.list_stars( - limit=limit, - offset=offset, - source_query=source_query, - min_mag=min_mag, - max_mag=max_mag, - ) - except Exception as exc: # noqa: BLE001 - raise HTTPException(status_code=400, detail=str(exc)) from exc - - -@router.get("/catalog/stars/{source_id}") -async def get_catalog_star(source_id: str): - """读取星点详情 / Get star details""" - try: - return await CatalogApiService.get_star(source_id) - except FileNotFoundError as exc: - raise HTTPException(status_code=404, detail=str(exc)) from exc - except Exception as exc: # noqa: BLE001 - raise HTTPException(status_code=400, detail=str(exc)) from exc - - -@router.post("/catalog/stars") -async def create_catalog_star(payload: CatalogStarUpsertRequest): - """新增星点 / Create star""" - try: - return await CatalogApiService.create_star(payload.model_dump()) - except Exception as exc: # noqa: BLE001 - raise HTTPException(status_code=400, detail=str(exc)) from exc - - -@router.put("/catalog/stars/{source_id}") -async def update_catalog_star(source_id: str, payload: CatalogStarUpsertRequest): - """更新星点 / Update star""" - try: - update_payload = payload.model_dump() - update_payload["source_id"] = source_id - return await CatalogApiService.update_star(source_id, update_payload) - except FileNotFoundError as exc: - raise HTTPException(status_code=404, detail=str(exc)) from exc - except Exception as exc: # noqa: BLE001 - raise HTTPException(status_code=400, detail=str(exc)) from exc - - -@router.delete("/catalog/stars/{source_id}") -async def delete_catalog_star(source_id: str): - """删除星点 / Delete star""" - try: - return await CatalogApiService.delete_star(source_id) - except FileNotFoundError as exc: - raise HTTPException(status_code=404, detail=str(exc)) from exc - except Exception as exc: # noqa: BLE001 - raise HTTPException(status_code=400, detail=str(exc)) from exc diff --git a/ogscope/web/api/catalog/services.py b/ogscope/web/api/catalog/services.py deleted file mode 100644 index 86f94df..0000000 --- a/ogscope/web/api/catalog/services.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -星表管理服务 / Catalog management services -""" - -from __future__ import annotations - -from typing import Any - -from ogscope.data.catalog.service import catalog_service - - -class CatalogApiService: - """星表 API 服务 / Catalog API service""" - - @staticmethod - async def download_catalog( - source: str, url: str | None, magnitude_limit: float - ) -> dict[str, Any]: - return catalog_service.download_catalog( - source=source, url=url, magnitude_limit=magnitude_limit - ) - - @staticmethod - async def build_index( - magnitude_limit: float, ra_bin_size_deg: float - ) -> dict[str, Any]: - return catalog_service.build_index( - magnitude_limit=magnitude_limit, ra_bin_size_deg=ra_bin_size_deg - ) - - @staticmethod - async def get_status() -> dict[str, Any]: - return catalog_service.get_status() - - @staticmethod - async def list_stars( - limit: int, - offset: int, - source_query: str | None, - min_mag: float | None, - max_mag: float | None, - ) -> dict[str, Any]: - return catalog_service.list_stars( - limit=limit, - offset=offset, - source_query=source_query, - min_mag=min_mag, - max_mag=max_mag, - ) - - @staticmethod - async def get_star(source_id: str) -> dict[str, Any]: - row = catalog_service.get_star(source_id) - if not row: - raise FileNotFoundError("星点不存在 / Star not found") - return row - - @staticmethod - async def create_star(payload: dict[str, Any]) -> dict[str, Any]: - return catalog_service.create_star(payload) - - @staticmethod - async def update_star(source_id: str, payload: dict[str, Any]) -> dict[str, Any]: - return catalog_service.update_star(source_id, payload) - - @staticmethod - async def delete_star(source_id: str) -> dict[str, Any]: - deleted = catalog_service.delete_star(source_id) - if not deleted: - raise FileNotFoundError("星点不存在 / Star not found") - return {"success": True, "source_id": source_id} diff --git a/ogscope/web/api/debug/routes.py b/ogscope/web/api/debug/routes.py index 284cc8b..d81614e 100644 --- a/ogscope/web/api/debug/routes.py +++ b/ogscope/web/api/debug/routes.py @@ -1,22 +1,26 @@ """ 调试控制台API路由 """ + +import asyncio + from fastapi import APIRouter, HTTPException, Query -from fastapi.responses import FileResponse -from fastapi.responses import StreamingResponse -from ogscope.web.api.models.schemas import CameraSettings, CameraPreset +from fastapi.responses import FileResponse, StreamingResponse + +from ogscope.core.realtime import realtime_solve_service from ogscope.web.api.debug.services import ( - DebugCameraService, - DebugPresetService, - DebugFileService + DebugCameraService, + DebugFileService, + DebugPresetService, ) -from ogscope.core.realtime import realtime_solve_service +from ogscope.web.api.models.schemas import CameraPreset, CameraSettings router = APIRouter() # ==================== 相机控制 ==================== / ==================== Camera Control ==================== + @router.get("/debug/camera/status") async def get_debug_camera_status(): """获取调试相机状态 / Get debug camera status""" @@ -26,6 +30,33 @@ async def get_debug_camera_status(): raise HTTPException(status_code=500, detail=str(e)) +@router.get("/debug/camera/runtime-overrides") +async def get_debug_camera_runtime_overrides(): + """获取运行时预览参数覆盖 / Get runtime preview overrides""" + try: + return await DebugCameraService.get_runtime_overrides() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@router.post("/debug/camera/runtime-overrides/reset") +async def reset_debug_camera_runtime_overrides(): + """重置运行时预览参数覆盖 / Reset runtime preview overrides""" + try: + return await DebugCameraService.clear_runtime_overrides() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@router.post("/debug/camera/runtime-overrides/apply-defaults") +async def apply_debug_camera_runtime_overrides_as_defaults(): + """确认将运行时预览参数写为系统默认 / Apply runtime overrides as system defaults""" + try: + return await DebugCameraService.apply_runtime_overrides_as_defaults() + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + @router.post("/debug/camera/start") async def start_debug_camera(): """启动调试相机 / Start the debug camera""" @@ -39,32 +70,35 @@ async def start_debug_camera(): async def stream_debug_camera(quality: int = Query(70, ge=10, le=100)): """MJPEG 实时流 - 可配置压缩质量 / MJPEG live streaming - configurable compression quality""" try: - from ogscope.web.api.debug.services import DebugCameraService - camera = DebugCameraService.get_camera_instance() - if not camera or not camera.is_capturing: - raise HTTPException(status_code=503, detail="相机未运行") - - import cv2 - import numpy as np - boundary = "frame" async def frame_generator(): + last_frame_id = -1 while True: - frame = camera.get_video_frame() - if frame is None: - break - ok, buf = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, quality]) - if not ok: + code, data, frame_id = await DebugCameraService.get_stream_frame_bytes( + "jpeg", quality + ) + if code != 200 or data is None: + await asyncio.sleep(0.05) + continue + if frame_id == last_frame_id: + await asyncio.sleep(0.03) continue - data = buf.tobytes() + last_frame_id = frame_id yield ( b"--" + boundary.encode() + b"\r\n" b"Content-Type: image/jpeg\r\n" - b"Content-Length: " + str(len(data)).encode() + b"\r\n\r\n" + data + b"\r\n" + b"Content-Length: " + + str(len(data)).encode() + + b"\r\n\r\n" + + data + + b"\r\n" ) - return StreamingResponse(frame_generator(), media_type=f"multipart/x-mixed-replace; boundary={boundary}") + return StreamingResponse( + frame_generator(), + media_type=f"multipart/x-mixed-replace; boundary={boundary}", + ) except HTTPException: raise except Exception as e: @@ -75,32 +109,35 @@ async def frame_generator(): async def stream_debug_camera_lossless(): """无损质量实时流 - 使用PNG格式展示超采样效果 / Lossless quality live streaming - using PNG format to demonstrate supersampling effects""" try: - from ogscope.web.api.debug.services import DebugCameraService - camera = DebugCameraService.get_camera_instance() - if not camera or not camera.is_capturing: - raise HTTPException(status_code=503, detail="相机未运行") - - import cv2 - import numpy as np - boundary = "frame" async def frame_generator(): + last_frame_id = -1 while True: - frame = camera.get_video_frame() - if frame is None: - break - ok, buf = cv2.imencode('.png', frame) - if not ok: + code, data, frame_id = await DebugCameraService.get_stream_frame_bytes( + "png", 100 + ) + if code != 200 or data is None: + await asyncio.sleep(0.05) + continue + if frame_id == last_frame_id: + await asyncio.sleep(0.03) continue - data = buf.tobytes() + last_frame_id = frame_id yield ( b"--" + boundary.encode() + b"\r\n" b"Content-Type: image/png\r\n" - b"Content-Length: " + str(len(data)).encode() + b"\r\n\r\n" + data + b"\r\n" + b"Content-Length: " + + str(len(data)).encode() + + b"\r\n\r\n" + + data + + b"\r\n" ) - return StreamingResponse(frame_generator(), media_type=f"multipart/x-mixed-replace; boundary={boundary}") + return StreamingResponse( + frame_generator(), + media_type=f"multipart/x-mixed-replace; boundary={boundary}", + ) except HTTPException: raise except Exception as e: @@ -121,6 +158,7 @@ async def set_camera_rotation(rotation: int): """设置相机旋转角度 / Set camera rotation angle""" try: from ogscope.web.api.debug.services import DebugCameraService + result = await DebugCameraService.set_rotation(rotation) return result except Exception as e: @@ -128,10 +166,10 @@ async def set_camera_rotation(rotation: int): @router.get("/debug/camera/preview") -async def get_debug_camera_preview(): +async def get_debug_camera_preview(since_frame_id: int | None = Query(default=None)): """获取调试相机预览 / Get debug camera preview""" try: - return await DebugCameraService.get_preview() + return await DebugCameraService.get_preview(since_frame_id=since_frame_id) except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @@ -164,10 +202,13 @@ async def stop_debug_recording(): @router.post("/debug/camera/size") -async def set_camera_size(width: int = Query(..., gt=0), height: int = Query(..., gt=0)): +async def set_camera_size( + width: int = Query(..., gt=0), height: int = Query(..., gt=0) +): """仅切换分辨率(宽高),不影响当前帧率;必要时重启预览 / Only switches the resolution (width and height) and does not affect the current frame rate; restart the preview if necessary""" try: from ogscope.web.api.debug.services import DebugCameraService + result = await DebugCameraService.set_size(width, height) return result except Exception as e: @@ -175,10 +216,13 @@ async def set_camera_size(width: int = Query(..., gt=0), height: int = Query(... @router.post("/debug/camera/sampling") -async def set_camera_sampling_mode(mode: str = Query(..., pattern="^(supersample|native|crop)$")): +async def set_camera_sampling_mode( + mode: str = Query(..., pattern="^(supersample|native|crop)$") +): """设置采样模式:supersample | native | crop""" try: from ogscope.web.api.debug.services import DebugCameraService + return await DebugCameraService.set_sampling_mode(mode) except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @@ -189,10 +233,12 @@ async def set_camera_fps(fps: int = Query(..., gt=0)): """仅设置帧率,尽量不影响当前预览 / Only set the frame rate and try not to affect the current preview""" try: from ogscope.web.api.debug.services import DebugCameraService + return await DebugCameraService.set_fps(fps) except Exception as e: raise HTTPException(status_code=500, detail=str(e)) + @router.post("/debug/camera/settings") async def update_debug_camera_settings(settings: CameraSettings): """更新调试相机设置 / Update debug camera settings""" @@ -287,10 +333,9 @@ async def set_camera_white_balance( raise HTTPException(status_code=500, detail=str(e)) - - # ==================== 预设管理 ==================== / ==================== Default Management ==================== + @router.get("/debug/camera/presets") async def get_camera_presets(): """获取相机预设列表 / Get a list of camera presets""" @@ -329,6 +374,7 @@ async def delete_camera_preset(preset_name: str): # ==================== 文件管理 ==================== / ==================== File Management ==================== + @router.get("/debug/files") async def get_capture_files(): """获取拍摄文件列表 / Get shooting file list""" @@ -342,16 +388,15 @@ async def get_capture_files(): async def download_capture_file(filename: str): """下载拍摄文件 / Download shooting files""" from pathlib import Path + DEBUG_CAPTURES_DIR = Path.home() / "dev_captures" file_path = DEBUG_CAPTURES_DIR / filename - + if not file_path.exists(): raise HTTPException(status_code=404, detail="文件不存在") - + return FileResponse( - path=str(file_path), - filename=filename, - media_type="application/octet-stream" + path=str(file_path), filename=filename, media_type="application/octet-stream" ) @@ -380,11 +425,18 @@ async def delete_capture_file(filename: str): async def start_realtime_solving( hint_ra_deg: float | None = Query(default=None), hint_dec_deg: float | None = Query(default=None), + fov_estimate: float | None = Query(default=None), + fov_max_error: float | None = Query(default=None), + solve_timeout_ms: int | None = Query(default=None), ): """启动实时解算 / Start realtime solving""" try: return await realtime_solve_service.start( - hint_ra_deg=hint_ra_deg, hint_dec_deg=hint_dec_deg + hint_ra_deg=hint_ra_deg, + hint_dec_deg=hint_dec_deg, + fov_estimate=fov_estimate, + fov_max_error=fov_max_error, + solve_timeout_ms=solve_timeout_ms, ) except Exception as e: raise HTTPException(status_code=500, detail=str(e)) diff --git a/ogscope/web/api/debug/services.py b/ogscope/web/api/debug/services.py index 34deb69..45da7c1 100644 --- a/ogscope/web/api/debug/services.py +++ b/ogscope/web/api/debug/services.py @@ -1,12 +1,18 @@ """ 调试控制台服务层 """ -import os -import json + import asyncio +import json +import logging +import os +import time +from concurrent.futures import ThreadPoolExecutor from datetime import datetime from pathlib import Path -from typing import Optional, Dict, Any, List +from typing import Any, Optional + +from ogscope.web.camera_shared import get_camera_manager # 调试控制台相关 / Debug console related DEBUG_CAPTURES_DIR = Path.home() / "dev_captures" @@ -16,16 +22,40 @@ camera_instance = None is_recording = False recording_task = None +# 录制会话元数据(用于停止时写入侧车) / Recording session metadata (for sidecar on stop) +recording_stem: Optional[str] = None +recording_t0_mono: Optional[float] = None +recording_fps_value: float = 15.0 +recording_media_filename: Optional[str] = None +recording_codec_fourcc: str = "mp4v" +recording_container: str = "MP4" # 预览帧缓存与抓取任务 / Preview frame buffering and grabbing tasks latest_preview_jpeg: Optional[bytes] = None last_preview_time: Optional[float] = None latest_preview_id: int = 0 preview_grabber_task = None +PREVIEW_JPEG_QUALITY = int(os.getenv("OGSCOPE_PREVIEW_JPEG_QUALITY", "75")) +PREVIEW_PIPELINE_WORKERS = 2 +_CAMERA_ENV_KEY_MAP = { + "width": "OGSCOPE_CAMERA_WIDTH", + "height": "OGSCOPE_CAMERA_HEIGHT", + "fps": "OGSCOPE_CAMERA_FPS", + "sampling_mode": "OGSCOPE_CAMERA_SAMPLING_MODE", + "exposure_us": "OGSCOPE_CAMERA_EXPOSURE", + "analogue_gain": "OGSCOPE_CAMERA_GAIN", +} -def i18n_payload(message_key: str, message: str, message_params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: - payload: Dict[str, Any] = { +# 串行化 ensure/start,避免并发 to_thread 竞争;与阻塞相机调用分离出事件循环 +# Serialize ensure/start; offload blocking camera calls from asyncio event loop. +_camera_ensure_lock = asyncio.Lock() + + +def i18n_payload( + message_key: str, message: str, message_params: Optional[dict[str, Any]] = None +) -> dict[str, Any]: + payload: dict[str, Any] = { "message_key": message_key, "message": message, } @@ -34,296 +64,572 @@ def i18n_payload(message_key: str, message: str, message_params: Optional[Dict[s return payload +def _persist_env_updates(updates: dict[str, Any]) -> Path: + """将键值写入项目 .env(存在则覆盖,不存在则追加)/ Persist key-values into project .env.""" + env_path = Path.cwd() / ".env" + if env_path.exists(): + lines = env_path.read_text(encoding="utf-8").splitlines() + else: + lines = [] + + pending = {str(k): str(v) for k, v in updates.items()} + new_lines: list[str] = [] + for line in lines: + stripped = line.strip() + if not stripped or stripped.startswith("#") or "=" not in line: + new_lines.append(line) + continue + key, _, _ = line.partition("=") + key = key.strip() + if key in pending: + new_lines.append(f"{key}={pending.pop(key)}") + else: + new_lines.append(line) + for key, value in pending.items(): + new_lines.append(f"{key}={value}") + env_path.write_text("\n".join(new_lines) + "\n", encoding="utf-8") + return env_path + + def get_camera_instance(): """获取相机实例 / Get camera instance""" - global camera_instance - if camera_instance is None: - from ogscope.hardware.camera import create_camera - from ogscope.config import get_settings - - settings = get_settings() - config = { - "type": "imx327_mipi", - "width": settings.camera_width, - "height": settings.camera_height, - "fps": 5, # 调试控制台默认使用 5fps(用户未指定时) / The debug console uses 5fps by default (when not specified by the user) - "exposure_us": settings.camera_exposure, - "analogue_gain": settings.camera_gain, - "auto_exposure": True, # 调试控制台默认自动曝光优先 / The debugging console defaults to automatic exposure priority. - "rotation": 180, # 默认180度旋转 / Default 180 degree rotation - "sampling_mode": getattr(settings, "camera_sampling_mode", "native"), - # 新增参数 / New parameters - "noise_reduction": 0, - "white_balance_mode": "auto", - "white_balance_gain_r": 1.0, - "white_balance_gain_b": 1.0, - "contrast": 1.0, - "brightness": 0.0, - "saturation": 1.0, - "sharpness": 1.0, - "night_mode": False, - "color_mode": "color", # 默认彩色模式 / Default color mode - } - - camera_instance = create_camera(config) - if camera_instance and not camera_instance.initialize(): - camera_instance = None - - return camera_instance - - -def generate_filename(prefix: str = "IMG") -> str: - """生成文件名 / Generate file name""" - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - return f"{prefix}_{timestamp}" - - -def save_capture_info(filename: str, camera_params: Dict[str, Any], file_size: int): - """保存拍摄信息到txt文件 / Save shooting information to txt file""" - info_file = DEBUG_CAPTURES_DIR / f"{filename}.txt" - - info_data = { - "filename": filename, - "timestamp": datetime.now().isoformat(), - "exposure_us": camera_params.get("exposure_us", 0), - "analogue_gain": camera_params.get("analogue_gain", 1.0), - "digital_gain": camera_params.get("digital_gain", 1.0), - "resolution": f"{camera_params.get('width', 1920)}x{camera_params.get('height', 1080)}", - "file_size": file_size, - "camera_type": camera_params.get("type", "imx327_mipi"), - "fps": camera_params.get("fps", 15) + manager = get_camera_manager() + return manager.get_camera_instance() + + +def _attach_manager_camera_if_needed(camera: Any) -> None: + """将兼容层返回的相机实例挂到共享管理器(测试与旧代码兼容)/ Attach compat camera to shared manager.""" + manager = get_camera_manager() + if camera is not None and manager.get_camera_instance() is None: + manager.attach_camera_instance(camera) + + +def _capture_timestamp_for_stem() -> str: + """生成带毫秒的时间戳,降低同秒碰撞 / Timestamp with milliseconds to reduce same-second collisions""" + dt = datetime.now() + return dt.strftime("%Y%m%d_%H%M%S") + f"_{dt.microsecond // 1000:03d}" + + +def _to_json_safe(value: Any) -> Any: + """将嵌套结构转为可 JSON 序列化的类型 / Convert nested structures to JSON-serializable types""" + if isinstance(value, dict): + return {str(k): _to_json_safe(v) for k, v in value.items()} + if isinstance(value, (list, tuple)): + return [_to_json_safe(v) for v in value] + if isinstance(value, (str, int, float, bool)) or value is None: + return value + return str(value) + + +def build_param_slug(camera_info: dict[str, Any]) -> str: + """从相机信息生成简短、文件名安全的参数片段 / Short filesystem-safe param slug from camera info""" + if not camera_info: + return "" + parts: list[str] = [] + exp = camera_info.get("exposure_us") + if exp is not None: + try: + parts.append(f"e{int(exp)}us") + except (TypeError, ValueError): + pass + ag = camera_info.get("analogue_gain") + if ag is not None: + try: + parts.append(f"ag{float(ag):.1f}".replace(".", "p")) + except (TypeError, ValueError): + pass + dg = camera_info.get("digital_gain") + if dg is not None: + try: + if abs(float(dg) - 1.0) > 0.01: + parts.append(f"dg{float(dg):.1f}".replace(".", "p")) + except (TypeError, ValueError): + pass + fps = camera_info.get("fps") + if fps is not None: + try: + parts.append(f"{float(fps):g}fps") + except (TypeError, ValueError): + pass + sm = camera_info.get("sampling_mode") + if sm and str(sm) != "native": + parts.append(str(sm)[:24]) + ow = camera_info.get("output_width") or camera_info.get("width") + oh = camera_info.get("output_height") or camera_info.get("height") + if ow and oh: + try: + parts.append(f"{int(ow)}x{int(oh)}") + except (TypeError, ValueError): + pass + slug = "_".join(parts) + for bad in '<>:"/\\|?*': + slug = slug.replace(bad, "-") + return slug[:120] + + +def generate_capture_stem(prefix: str, camera_info: dict[str, Any]) -> str: + """生成带参数摘要的文件名主干(无扩展名)/ File stem (no extension) with param summary""" + ts = _capture_timestamp_for_stem() + slug = build_param_slug(camera_info) + if slug: + return f"{prefix}_{ts}_{slug}" + return f"{prefix}_{ts}" + + +def save_capture_sidecar( + stem: str, + camera_params: dict[str, Any], + *, + kind: str, + media_filename: str, + file_size: int, + extra: Optional[dict[str, Any]] = None, +) -> None: + """将完整拍摄/录制参数写入同名 .txt 侧车 / Write full capture params to sidecar .txt file""" + info_file = DEBUG_CAPTURES_DIR / f"{stem}.txt" + payload: dict[str, Any] = { + "kind": kind, + "media_file": media_filename, + "sidecar_version": 2, + "created_at": datetime.now().isoformat(), + "file_size_bytes": file_size, + "camera": _to_json_safe(camera_params), } - - with open(info_file, 'w', encoding='utf-8') as f: - json.dump(info_data, f, indent=2, ensure_ascii=False) + if extra: + payload["extra"] = _to_json_safe(extra) + with open(info_file, "w", encoding="utf-8") as f: + json.dump(payload, f, indent=2, ensure_ascii=False) class DebugCameraService: """调试相机服务 / Debug camera service""" - + @staticmethod def get_camera_instance(): """提供给路由的获取实例入口(兼容 routes 中的调用) / Obtain instance entry provided for routing (compatible with calls in routes)""" return globals()["get_camera_instance"]() - + @staticmethod async def get_camera_status(): """获取调试相机状态 / Get debug camera status""" - camera = get_camera_instance() - if not camera: + camera = await asyncio.to_thread(get_camera_instance) + _attach_manager_camera_if_needed(camera) + status = await get_camera_manager().status() + if not status.get("connected"): return { "connected": False, "streaming": False, "recording": is_recording, - "error": "相机未初始化" + "error": "相机未初始化", } - return { - "connected": camera.is_initialized, - "streaming": camera.is_capturing, + "connected": bool(status.get("connected")), + "streaming": bool(status.get("streaming")), "recording": is_recording, - "info": camera.get_camera_info() + "info": status.get("info", {}), + "runtime_overrides": status.get("runtime_overrides", {}), } - + + @staticmethod + async def get_runtime_overrides(): + """获取运行时预览覆盖参数 / Get runtime preview overrides.""" + manager = get_camera_manager() + return {"runtime_overrides": manager.get_runtime_overrides()} + + @staticmethod + async def clear_runtime_overrides(): + """清空运行时预览覆盖参数 / Clear runtime preview overrides.""" + manager = get_camera_manager() + manager.clear_runtime_overrides() + return { + "success": True, + **i18n_payload( + "server.runtimeOverridesCleared", + "运行时预览参数已清空", + ), + } + + @staticmethod + async def apply_runtime_overrides_as_defaults(): + """将运行时覆盖参数确认写入系统默认 .env / Persist runtime overrides to .env defaults.""" + manager = get_camera_manager() + overrides = manager.get_runtime_overrides() + if not overrides: + return { + "success": True, + "applied": {}, + "skipped": {}, + **i18n_payload( + "server.runtimeOverridesEmpty", + "当前没有待确认的运行时参数", + ), + } + applied: dict[str, Any] = {} + skipped: dict[str, Any] = {} + for key, value in overrides.items(): + env_key = _CAMERA_ENV_KEY_MAP.get(key) + if env_key: + applied[env_key] = value + else: + skipped[key] = value + env_path = None + if applied: + env_path = _persist_env_updates(applied) + return { + "success": True, + "applied": applied, + "skipped": skipped, + "env_path": str(env_path) if env_path else None, + **i18n_payload( + "server.runtimeOverridesAppliedAsDefaults", + "运行时参数已写入系统默认配置", + ), + } + @staticmethod async def start_camera(): """启动调试相机 / Start the debug camera""" - camera = get_camera_instance() - if not camera: - raise Exception("相机初始化失败") - - if camera.start_capture(): - # 启动后台抓取任务 / Start background crawling task - await DebugCameraService._ensure_preview_grabber() - return {"success": True, **i18n_payload("server.cameraStarted", "相机启动成功")} - else: - raise Exception("相机启动失败") - + camera = await asyncio.to_thread(get_camera_instance) + _attach_manager_camera_if_needed(camera) + await get_camera_manager().ensure_started() + return {"success": True, **i18n_payload("server.cameraStarted", "相机启动成功")} + + @staticmethod + async def ensure_camera_streaming(): + """确保相机已采集并刷新预览(分析台与 /api/camera 共用单例,避免重复打开设备)/ Ensure capture + preview; shared singleton for lab and /api/camera.""" + camera = await asyncio.to_thread(get_camera_instance) + _attach_manager_camera_if_needed(camera) + await get_camera_manager().ensure_started() + @staticmethod async def stop_camera(): """停止调试相机 / Stop debugging camera""" - camera = get_camera_instance() - if not camera: - return {"success": True, **i18n_payload("server.cameraNotRunning", "相机未运行")} - - if camera.stop_capture(): - await DebugCameraService._stop_preview_grabber() - return {"success": True, **i18n_payload("server.cameraStopped", "相机停止成功")} - else: - raise Exception("相机停止失败") - + camera = await asyncio.to_thread(get_camera_instance) + _attach_manager_camera_if_needed(camera) + await get_camera_manager().stop() + return {"success": True, **i18n_payload("server.cameraStopped", "相机停止成功")} + @staticmethod - async def get_preview(): + async def get_preview(since_frame_id: int | None = None): """获取调试相机预览 / Get debug camera preview""" - camera = get_camera_instance() - if not camera or not camera.is_capturing: - raise Exception("相机未运行") - - try: - # 若后台抓取未运行,尝试启动一次 / If background crawling is not running, try to start it once - await DebugCameraService._ensure_preview_grabber() - - # 等待最多500ms 以获取缓存帧 / Wait up to 500ms for cached frames - import time - deadline = time.time() + 0.5 - global latest_preview_jpeg, latest_preview_id, last_preview_time - while latest_preview_jpeg is None and time.time() < deadline: - await asyncio.sleep(0.01) - if latest_preview_jpeg is None: + from fastapi.responses import Response + + manager = get_camera_manager() + code, frame = await manager.get_preview_frame(since_frame_id) + if code == 304: + return Response(status_code=304) + if code != 200 or frame is None or frame.jpeg_frame is None: + # 首帧兜底:直接抓一帧并编码,避免前端启动后长时间黑屏 + # First-frame fallback: grab one frame immediately to avoid prolonged black screen. + raw, frame_id, frame_ts = await manager.get_raw_frame() + jpeg = await asyncio.to_thread(manager.encode_frame, raw, "jpeg", 75) + if jpeg is None: raise Exception("暂无预览帧") - from fastapi.responses import Response return Response( - content=latest_preview_jpeg, + content=jpeg, media_type="image/jpeg", headers={ "Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", - "X-Frame-Id": str(latest_preview_id), - "X-Frame-Ts": str(last_preview_time or 0.0), + "X-Frame-Id": str(frame_id), + "X-Frame-Ts": str(frame_ts), }, ) - except Exception as e: - raise Exception(f"预览失败: {str(e)}") - + return Response( + content=frame.jpeg_frame, + media_type="image/jpeg", + headers={ + "Cache-Control": "no-cache, no-store, must-revalidate", + "Pragma": "no-cache", + "X-Frame-Id": str(frame.frame_id), + "X-Frame-Ts": str(frame.timestamp), + "X-Frame-Width": str(frame.width), + "X-Frame-Height": str(frame.height), + }, + ) + + @staticmethod + async def get_stream_frame_bytes( + image_format: str = "jpeg", quality: int = 75 + ) -> tuple[int, bytes | None, int]: + """读取共享流帧并编码 / Read shared frame and encode.""" + manager = get_camera_manager() + await manager.ensure_started() + snap = await manager.get_cached_frame_snapshot() + if snap is None or snap.raw_frame is None: + return 503, None, 0 + if image_format.lower() == "jpeg" and snap.jpeg_frame is not None: + return 200, snap.jpeg_frame, snap.frame_id + encoded = await asyncio.to_thread( + manager.encode_frame, snap.raw_frame, image_format, int(quality) + ) + if encoded is None: + return 500, None, snap.frame_id + return 200, encoded, snap.frame_id + @staticmethod async def capture_image(): """拍摄单张图片 / Take a single picture""" camera = get_camera_instance() if not camera or not camera.is_capturing: raise Exception("相机未运行") - + try: import cv2 - + # 捕获图像 / capture image image = camera.capture_image() if image is None: raise Exception("图像捕获失败") - - # 生成文件名 / Generate file name - filename = generate_filename("IMG") - image_path = DEBUG_CAPTURES_DIR / f"{filename}.jpg" - + + camera_info = camera.get_camera_info() + expected_w = int( + camera_info.get("output_width", camera_info.get("width", 0)) or 0 + ) + expected_h = int( + camera_info.get("output_height", camera_info.get("height", 0)) or 0 + ) + actual_h, actual_w = image.shape[:2] + rotation = int(camera_info.get("rotation", 0) or 0) + if rotation in (90, 270): + expected_w, expected_h = expected_h, expected_w + if ( + expected_w > 0 + and expected_h > 0 + and (int(actual_w) != expected_w or int(actual_h) != expected_h) + ): + raise Exception( + f"拍照分辨率与当前设置不一致: expected={expected_w}x{expected_h}, actual={actual_w}x{actual_h}" + ) + + # 生成文件名(含参数摘要)/ File name with param summary in stem + stem = generate_capture_stem("IMG", camera_info) + image_path = DEBUG_CAPTURES_DIR / f"{stem}.jpg" + # 保存图像 / save image success = cv2.imwrite(str(image_path), image) if not success: raise Exception("图像保存失败") - - # 保存拍摄信息 / Save shooting information - camera_info = camera.get_camera_info() + + # 保存拍摄信息侧车 / Save capture sidecar (.txt) file_size = image_path.stat().st_size - save_capture_info(filename, camera_info, file_size) - + save_capture_sidecar( + stem, + camera_info, + kind="photo", + media_filename=f"{stem}.jpg", + file_size=file_size, + extra={ + "actual_saved_width": int(actual_w), + "actual_saved_height": int(actual_h), + "expected_output_width": int(expected_w), + "expected_output_height": int(expected_h), + }, + ) + return { "success": True, - "filename": f"{filename}.jpg", + "filename": f"{stem}.jpg", "path": str(image_path), - "size": file_size + "size": file_size, + "actual_saved_width": int(actual_w), + "actual_saved_height": int(actual_h), + "expected_output_width": int(expected_w), + "expected_output_height": int(expected_h), } - + except ImportError: raise Exception("OpenCV未安装") except Exception as e: raise Exception(f"拍摄失败: {str(e)}") - + @staticmethod async def set_rotation(rotation: int): """设置图像旋转角度 / Set image rotation angle""" camera = get_camera_instance() if not camera: raise Exception("相机未初始化") - + if camera.set_rotation(rotation): + get_camera_manager().update_runtime_overrides({"rotation": int(rotation)}) return { "success": True, **i18n_payload( "server.rotationSet", f"旋转角度设置为: {rotation}度", - {"rotation": rotation} - ) + {"rotation": rotation}, + ), } else: raise Exception("设置旋转角度失败") - + @staticmethod async def start_recording(): """开始录制视频 / Start recording video""" - global is_recording, recording_task - + global is_recording, recording_task, recording_stem, recording_t0_mono, recording_fps_value + global recording_media_filename, recording_codec_fourcc, recording_container + if is_recording: raise Exception("已在录制中") - + camera = get_camera_instance() if not camera or not camera.is_capturing: raise Exception("相机未运行") - + try: + import time + import cv2 - import numpy as np - - filename = generate_filename("VID") - video_path = DEBUG_CAPTURES_DIR / f"{filename}.avi" - - # 创建视频写入器(MJPG / Create video writer (MJPG - fourcc = cv2.VideoWriter_fourcc(*'MJPG') + camera_info = camera.get_camera_info() - width = camera_info.get('width', 1920) - height = camera_info.get('height', 1080) - fps = camera_info.get('fps', 15) - - video_writer = cv2.VideoWriter(str(video_path), fourcc, fps, (width, height)) - - if not video_writer.isOpened(): - raise Exception("视频写入器创建失败") - + stem = generate_capture_stem("VID", camera_info) + video_path = DEBUG_CAPTURES_DIR / f"{stem}.mp4" + + # 优先使用 MP4 编码,按候选顺序探测可用编码器 / Prefer MP4 codecs and probe available codecs in order. + # 浏览器兼容优先级:H264/avc1 通常优于 mp4v + # Browser compatibility priority: H264/avc1 are generally more compatible than mp4v. + codec_candidates = [ + ("avc1", "MP4"), + ("H264", "MP4"), + ("mp4v", "MP4"), + ] + width = int(camera_info.get("output_width", camera_info.get("width", 1920))) + height = int( + camera_info.get("output_height", camera_info.get("height", 1080)) + ) + fps = float(camera_info.get("fps", 15)) + recording_fps_value = fps + + video_writer = None + chosen_codec = None + chosen_container = None + for codec_tag, container in codec_candidates: + fourcc = cv2.VideoWriter_fourcc(*codec_tag) + candidate_writer = cv2.VideoWriter( + str(video_path), fourcc, fps, (width, height) + ) + if candidate_writer.isOpened(): + video_writer = candidate_writer + chosen_codec = codec_tag + chosen_container = container + break + candidate_writer.release() + + if video_writer is None or not video_writer.isOpened(): + raise Exception("视频写入器创建失败(MP4编码器不可用)") + if str(chosen_codec or "").lower() == "mp4v": + logging.getLogger(__name__).warning( + "录制回退到 mp4v,某些浏览器可能无法预览该 MP4 文件" + ) + + recording_stem = stem + recording_t0_mono = time.monotonic() + recording_media_filename = f"{stem}.mp4" + recording_codec_fourcc = str(chosen_codec or "mp4v") + recording_container = str(chosen_container or "MP4") is_recording = True - + # 启动录制任务 / Start recording task async def record_video(): nonlocal video_writer try: while is_recording: - image = camera.capture_image() + # 采集单帧放到线程,避免阻塞事件循环影响“停止录制”响应 / Offload frame capture to a thread to keep stop-recording responsive. + image = await asyncio.to_thread(camera.capture_image) if image is not None: # OpenCV 期望 BGR / OpenCV expects BGR try: import cv2 + bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) except Exception: bgr = image video_writer.write(bgr) - await asyncio.sleep(1/max(fps,1)) + await asyncio.sleep(1 / max(fps, 1)) finally: video_writer.release() - + recording_task = asyncio.create_task(record_video()) - - return { - "success": True, - "filename": f"{filename}.avi", - "path": str(video_path) - } - + + return {"success": True, "filename": f"{stem}.mp4", "path": str(video_path)} + except ImportError: raise Exception("OpenCV未安装") except Exception as e: raise Exception(f"录制启动失败: {str(e)}") - + @staticmethod async def stop_recording(): """停止录制视频 / Stop recording video""" - global is_recording, recording_task - + global is_recording, recording_task, recording_stem, recording_t0_mono, recording_fps_value + global recording_media_filename, recording_codec_fourcc, recording_container + if not is_recording: raise Exception("未在录制中") - + + import time + + stem = recording_stem + t0 = recording_t0_mono + nominal_fps = recording_fps_value + media_filename = recording_media_filename + codec_fourcc = recording_codec_fourcc + container = recording_container + is_recording = False - + if recording_task: - await recording_task + try: + # 最多等待短时间优雅结束,避免停止录制长时间卡住 / Wait briefly for graceful stop to avoid long stop-recording stalls. + await asyncio.wait_for(recording_task, timeout=2.0) + except asyncio.TimeoutError: + # 超时后主动取消任务,确保接口快速返回 / Cancel on timeout so API can return quickly. + recording_task.cancel() + try: + await asyncio.wait_for(recording_task, timeout=1.0) + except asyncio.CancelledError: + pass + except Exception as e: + logging.getLogger(__name__).warning( + "停止录制时等待录制任务取消异常: %s", e + ) + except asyncio.CancelledError: + pass recording_task = None - - return {"success": True, **i18n_payload("server.recordingStopped", "录制已停止")} + + # 写入录制参数侧车(与视频同名 .txt)/ Write recording sidecar (.txt) next to video file + if stem: + media_filename = media_filename or f"{stem}.mp4" + video_path = DEBUG_CAPTURES_DIR / media_filename + duration_s = 0.0 + if t0 is not None: + duration_s = max(0.0, time.monotonic() - t0) + file_size = int(video_path.stat().st_size) if video_path.exists() else 0 + camera = get_camera_instance() + camera_info = camera.get_camera_info() if camera else {} + save_capture_sidecar( + stem, + camera_info, + kind="video", + media_filename=media_filename, + file_size=file_size, + extra={ + "duration_s": round(duration_s, 3), + "nominal_fps": nominal_fps, + "codec_fourcc": codec_fourcc, + "container": container, + }, + ) + recording_stem = None + recording_t0_mono = None + recording_media_filename = None + recording_codec_fourcc = "mp4v" + recording_container = "MP4" + + return { + "success": True, + **i18n_payload("server.recordingStopped", "录制已停止"), + } @staticmethod async def set_size(width: int, height: int): @@ -331,66 +637,67 @@ async def set_size(width: int, height: int): camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + # 验证输入参数 / Validate input parameters if width <= 0 or height <= 0: raise Exception("分辨率参数无效") - + # 检查当前分辨率是否相同 / Check if the current resolutions are the same info = camera.get_camera_info() - current_width = info.get('output_width', info.get('width', 0)) - current_height = info.get('output_height', info.get('height', 0)) - + current_width = info.get("output_width", info.get("width", 0)) + current_height = info.get("output_height", info.get("height", 0)) + if current_width == width and current_height == height: - return {"success": True, "info": info, **i18n_payload("server.resolutionUnchanged", "分辨率未变化")} - - # 为避免在预览抓取进行中重配导致底层冲突:先停抓取,再设置,最后重启抓取 / To avoid underlying conflicts caused by reconfiguration while preview crawling is in progress: stop crawling first, then set up, and finally restart crawling. + return { + "success": True, + "info": info, + **i18n_payload("server.resolutionUnchanged", "分辨率未变化"), + } + try: - await DebugCameraService._stop_preview_grabber() - - # 设置超时,避免卡死 / Set timeout to avoid stuck - import asyncio - success = await asyncio.wait_for( - asyncio.get_event_loop().run_in_executor( - None, camera.set_resolution, int(width), int(height) - ), - timeout=10.0 # 10秒超时 / 10 seconds timeout + success = await get_camera_manager().reconfigure_camera( + "set_resolution", + lambda: camera.set_resolution(int(width), int(height)), + timeout_sec=10.0, ) - if not success: raise Exception("相机设置分辨率失败") - except asyncio.TimeoutError: raise Exception("设置分辨率超时,请重试") except Exception as e: - # 出错也尽量恢复抓取器 / Try to restore the crawler if something goes wrong. - try: - await DebugCameraService._ensure_preview_grabber() - except Exception: - pass raise Exception(f"设置分辨率失败: {str(e)}") # 校验是否已生效(以相机报告的尺寸为准) / Verify whether the verification has taken effect (subject to the size reported by the camera) info = camera.get_camera_info() # 在supersample模式下,检查output_width和output_height / In supersample mode, check output_width and output_height - if info.get('sampling_mode') == 'supersample': - applied = (int(info.get('output_width', 0)) == int(width) and int(info.get('output_height', 0)) == int(height)) + if info.get("sampling_mode") == "supersample": + applied = int(info.get("output_width", 0)) == int(width) and int( + info.get("output_height", 0) + ) == int(height) else: - applied = (int(info.get('width', 0)) == int(width) and int(info.get('height', 0)) == int(height)) - + applied = int(info.get("width", 0)) == int(width) and int( + info.get("height", 0) + ) == int(height) + if not applied: # 如果设置未生效,记录警告但不抛出异常 / If the setting does not take effect, log a warning but do not throw an exception current_res = f"{info.get('width', 0)}x{info.get('height', 0)}" - if info.get('sampling_mode') == 'supersample': - current_res = f"{info.get('output_width', 0)}x{info.get('output_height', 0)}" - print(f"警告: 分辨率设置可能未完全生效,当前分辨率: {current_res}") + if info.get("sampling_mode") == "supersample": + current_res = ( + f"{info.get('output_width', 0)}x{info.get('output_height', 0)}" + ) + logging.getLogger(__name__).warning( + f"分辨率设置可能未完全生效,当前分辨率: {current_res}" + ) - # 分辨率调整后尝试重启抓取器(失败不影响返回) / Try to restart the crawler after adjusting the resolution (failure does not affect return) - try: - await DebugCameraService._restart_preview_grabber() - except Exception: - pass - return {"success": True, "info": info, **i18n_payload("server.resolutionUpdated", "分辨率已更新")} + get_camera_manager().update_runtime_overrides( + {"width": int(width), "height": int(height)} + ) + return { + "success": True, + "info": info, + **i18n_payload("server.resolutionUpdated", "分辨率已更新"), + } @staticmethod async def set_sampling_mode(mode: str): @@ -398,35 +705,33 @@ async def set_sampling_mode(mode: str): camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + # 验证输入参数 / Validate input parameters - if mode not in ['supersample', 'native', 'crop']: + if mode not in ["supersample", "native", "crop"]: raise Exception(f"不支持的采样模式: {mode}") - - # 避免与预览抓取竞争:先停抓取 / Avoid competing with preview crawling: stop crawling first + try: - await DebugCameraService._stop_preview_grabber() - ok = camera.set_sampling_mode(mode) + ok = await get_camera_manager().reconfigure_camera( + "set_sampling_mode", + lambda: camera.set_sampling_mode(mode), + timeout_sec=10.0, + ) if not ok: raise Exception("相机设置采样模式失败") except Exception as e: - try: - await DebugCameraService._ensure_preview_grabber() - except Exception: - pass raise Exception(f"设置采样模式失败: {str(e)}") - + # 验证设置是否生效 / Verify whether the settings take effect info = camera.get_camera_info() - current_mode = info.get('sampling_mode', 'unknown') + current_mode = info.get("sampling_mode", "unknown") requested_mode = mode if requested_mode == "supersample" and current_mode == "native": # 在高分辨率场景下会自动降级为 native,这是预期行为 / In high-resolution scenarios, it is expected to automatically downgrade to native. pass elif current_mode != requested_mode: raise Exception(f"采样模式设置未生效,当前模式: {current_mode}") - - await DebugCameraService._restart_preview_grabber() + + get_camera_manager().update_runtime_overrides({"sampling_mode": mode}) return { "success": True, "info": info, @@ -436,7 +741,7 @@ async def set_sampling_mode(mode: str): "server.samplingModeSet", f"采样模式请求为 {requested_mode},实际生效为 {current_mode}", {"requested_mode": requested_mode, "effective_mode": current_mode}, - ) + ), } @staticmethod @@ -445,51 +750,70 @@ async def set_fps(fps: int): camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + # 验证输入参数 / Validate input parameters if fps <= 0 or fps > 60: raise Exception(f"帧率参数无效: {fps} (应在1-60之间)") - + try: ok = False - # 优先热更新帧率(同步调用,避免执行器上下文问题) / Prioritize hot update frame rate (synchronous call to avoid executor context issues) - if hasattr(camera, 'set_fps'): - ok = camera.set_fps(int(fps)) + if hasattr(camera, "set_fps"): + ok = await get_camera_manager().reconfigure_camera( + "set_fps", + lambda: camera.set_fps(int(fps)), + timeout_sec=10.0, + ) else: - # 兼容旧实现:通过 set_resolution 传入 fps / Compatible with old implementation: pass in fps through set_resolution info = camera.get_camera_info() - # 为避免竞争,切换前停抓取 / To avoid competition, stop crawling before switching - await DebugCameraService._stop_preview_grabber() - ok = camera.set_resolution(info.get('width', 640), info.get('height', 360), int(fps)) + ok = await get_camera_manager().reconfigure_camera( + "set_fps_by_set_resolution", + lambda: camera.set_resolution( + info.get("width", 640), info.get("height", 360), int(fps) + ), + timeout_sec=10.0, + ) if not ok: raise Exception("相机设置帧率失败") - + # 验证设置是否生效 / Verify whether the settings take effect info = camera.get_camera_info() - current_fps = info.get('fps', 0) + current_fps = info.get("fps", 0) if current_fps != int(fps): # 如果设置未生效,尝试重新设置一次 / If the setting does not take effect, try setting it again try: - if hasattr(camera, 'set_fps'): - ok = camera.set_fps(int(fps)) + if hasattr(camera, "set_fps"): + ok = await get_camera_manager().reconfigure_camera( + "retry_set_fps", + lambda: camera.set_fps(int(fps)), + timeout_sec=10.0, + ) else: - ok = camera.set_resolution(info.get('width', 640), info.get('height', 360), int(fps)) + ok = await get_camera_manager().reconfigure_camera( + "retry_set_fps_by_set_resolution", + lambda: camera.set_resolution( + info.get("width", 640), + info.get("height", 360), + int(fps), + ), + timeout_sec=10.0, + ) if ok: info = camera.get_camera_info() - current_fps = info.get('fps', 0) + current_fps = info.get("fps", 0) except Exception: pass - + if current_fps != int(fps): raise Exception(f"帧率设置未生效,当前帧率: {current_fps}") - - # 帧率变化后,预览抓取节流需要同步 / After the frame rate changes, preview capture throttling needs to be synchronized - await DebugCameraService._restart_preview_grabber() + + get_camera_manager().update_runtime_overrides({"fps": int(fps)}) return { "success": True, "info": info, - **i18n_payload("server.fpsSet", f"帧率设置为 {int(fps)}", {"fps": int(fps)}) + **i18n_payload( + "server.fpsSet", f"帧率设置为 {int(fps)}", {"fps": int(fps)} + ), } except Exception as e: raise Exception(f"设置帧率失败: {str(e)}") @@ -497,33 +821,39 @@ async def set_fps(fps: int): # ==================== 内部:预览抓取器 ==================== / ==================== Internal: Preview Grabber ==================== @staticmethod async def _ensure_preview_grabber(): - global preview_grabber_task - if preview_grabber_task and not preview_grabber_task.done(): - return - preview_grabber_task = asyncio.create_task(DebugCameraService._preview_grabber_loop()) + await get_camera_manager().resume_grabber() @staticmethod async def _stop_preview_grabber(): - global preview_grabber_task - if preview_grabber_task: - preview_grabber_task.cancel() - try: - # 添加超时机制,避免无限等待 / Add a timeout mechanism to avoid infinite waiting - await asyncio.wait_for(preview_grabber_task, timeout=2.0) - except asyncio.TimeoutError: - # 超时后强制取消 / Forced cancellation after timeout - preview_grabber_task.cancel() - except asyncio.CancelledError: - # 任务被取消是正常的,不需要处理 / It is normal for the task to be canceled and does not need to be processed. - pass - except Exception: - pass - preview_grabber_task = None + await get_camera_manager().pause_grabber() @staticmethod async def _restart_preview_grabber(): - await DebugCameraService._stop_preview_grabber() - await DebugCameraService._ensure_preview_grabber() + await get_camera_manager().pause_grabber() + await get_camera_manager().resume_grabber() + + @staticmethod + def _capture_preview_frame(camera): + """抓取预览帧(线程池执行) / Capture preview frame (run in thread pool)""" + try: + return camera.get_video_frame() + except Exception: + return None + + @staticmethod + def _encode_preview_jpeg(image, quality: int) -> Optional[bytes]: + """编码 JPEG(线程池执行) / Encode JPEG (run in thread pool)""" + try: + import cv2 + + ok, buf = cv2.imencode( + ".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, int(quality)] + ) + if not ok: + return None + return buf.tobytes() + except Exception: + return None @staticmethod async def _preview_grabber_loop(): @@ -532,24 +862,35 @@ async def _preview_grabber_loop(): camera = get_camera_instance() if not camera or not camera.is_capturing: return - import cv2 - import time - target_fps = max(1, int(camera.get_camera_info().get('fps', 5))) + target_fps = max(1, int(camera.get_camera_info().get("fps", 5))) interval = 1.0 / target_fps + loop = asyncio.get_running_loop() + # 使用双工人流水线:一个抓帧,一个编码,提升 Zero2W 下实时预览稳定性 / Use a two-worker pipeline: one captures frames and one encodes, improving preview stability on Zero2W. + executor = ThreadPoolExecutor( + max_workers=PREVIEW_PIPELINE_WORKERS, thread_name_prefix="preview-pipe" + ) try: + capture_future = loop.run_in_executor( + executor, DebugCameraService._capture_preview_frame, camera + ) while True: start = time.time() - try: - image = camera.get_video_frame() - if image is not None: - ok, buf = cv2.imencode('.jpg', image, [cv2.IMWRITE_JPEG_QUALITY, 85]) - if ok: - latest_preview_jpeg = buf.tobytes() - last_preview_time = time.time() - latest_preview_id += 1 - except Exception: - # 忽略单帧失败 / Ignore single frame failures - pass + image = await asyncio.wrap_future(capture_future) + # 先提交下一帧抓取,让抓取与编码并行 / Submit next frame capture first to overlap capture and encoding + capture_future = loop.run_in_executor( + executor, DebugCameraService._capture_preview_frame, camera + ) + if image is not None: + jpeg_bytes = await loop.run_in_executor( + executor, + DebugCameraService._encode_preview_jpeg, + image, + PREVIEW_JPEG_QUALITY, + ) + if jpeg_bytes is not None: + latest_preview_jpeg = jpeg_bytes + last_preview_time = time.time() + latest_preview_id += 1 # 按 fps 节流 / Throttle by fps spent = time.time() - start await asyncio.sleep(max(0.0, interval - spent)) @@ -558,8 +899,9 @@ async def _preview_grabber_loop(): raise except Exception as e: # 记录其他异常 / Log other exceptions - import logging logging.getLogger(__name__).error(f"预览抓取器异常: {e}") + finally: + executor.shutdown(wait=False, cancel_futures=True) @staticmethod async def set_auto_exposure_mode(enabled: bool): @@ -568,12 +910,13 @@ async def set_auto_exposure_mode(enabled: bool): if not camera or not camera.is_initialized: raise Exception("相机未初始化") - if not hasattr(camera, 'set_auto_exposure'): + if not hasattr(camera, "set_auto_exposure"): raise Exception("当前相机不支持自动曝光切换") if not camera.set_auto_exposure(bool(enabled)): raise Exception("设置自动曝光模式失败") + get_camera_manager().update_runtime_overrides({"auto_exposure": bool(enabled)}) return { "success": True, **i18n_payload("server.autoExposureUpdated", "曝光模式已更新"), @@ -581,157 +924,219 @@ async def set_auto_exposure_mode(enabled: bool): } @staticmethod - async def update_settings(settings: Dict[str, Any]): + async def update_settings(settings: dict[str, Any]): """更新调试相机设置 / Update debug camera settings""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + try: # 优先处理自动曝光开关,避免自动 / Prioritize the automatic exposure switch to avoid automatic - auto_exposure = settings.get("autoExposure", getattr(camera, "auto_exposure", False)) - if hasattr(camera, 'set_auto_exposure'): + auto_exposure = settings.get( + "autoExposure", getattr(camera, "auto_exposure", False) + ) + if hasattr(camera, "set_auto_exposure"): camera.set_auto_exposure(bool(auto_exposure)) # 更新基础相机参数 / Update basic camera parameters if not auto_exposure and "exposure" in settings: camera.set_exposure(settings["exposure"]) - + if not auto_exposure and "gain" in settings and "digitalGain" in settings: camera.set_gain(settings["gain"], settings.get("digitalGain", 1.0)) elif not auto_exposure and "gain" in settings: camera.set_gain(settings["gain"]) - + # 更新图像增强参数 / Update image enhancement parameters - if any(key in settings for key in ["contrast", "brightness", "saturation", "sharpness"]): + if any( + key in settings + for key in ["contrast", "brightness", "saturation", "sharpness"] + ): contrast = settings.get("contrast", 1.0) brightness = settings.get("brightness", 0.0) saturation = settings.get("saturation", 1.0) sharpness = settings.get("sharpness", 1.0) - - if hasattr(camera, 'set_image_enhancement'): - camera.set_image_enhancement(contrast, brightness, saturation, sharpness) - + + if hasattr(camera, "set_image_enhancement"): + camera.set_image_enhancement( + contrast, brightness, saturation, sharpness + ) + # 更新降噪设置 / Update noise reduction settings if "noiseReduction" in settings: - if hasattr(camera, 'set_noise_reduction'): + if hasattr(camera, "set_noise_reduction"): camera.set_noise_reduction(settings["noiseReduction"]) - + # 更新白平衡设置 / Update white balance settings if "whiteBalanceMode" in settings: mode = settings["whiteBalanceMode"] gain_r = settings.get("whiteBalanceGainR", 1.0) gain_b = settings.get("whiteBalanceGainB", 1.0) - - if hasattr(camera, 'set_white_balance'): + + if hasattr(camera, "set_white_balance"): camera.set_white_balance(mode, gain_r, gain_b) - + # 更新颜色模式设置 / Update color mode settings if "colorMode" in settings: - if hasattr(camera, 'set_color_mode'): - camera.set_color_mode(settings["colorMode"]) - + if hasattr(camera, "set_color_mode"): + await get_camera_manager().reconfigure_camera( + "update_color_mode", + lambda: camera.set_color_mode(settings["colorMode"]), + timeout_sec=10.0, + ) + + overrides: dict[str, Any] = {} + if "exposure" in settings: + overrides["exposure_us"] = settings["exposure"] + if "gain" in settings: + overrides["analogue_gain"] = settings["gain"] + if "digitalGain" in settings: + overrides["digital_gain"] = settings["digitalGain"] + if "autoExposure" in settings: + overrides["auto_exposure"] = bool(settings["autoExposure"]) + if "colorMode" in settings: + overrides["color_mode"] = settings["colorMode"] + if overrides: + get_camera_manager().update_runtime_overrides(overrides) + return { "success": True, **i18n_payload("server.cameraSettingsUpdated", "相机设置已更新"), - "settings": settings + "settings": settings, } except Exception as e: raise Exception(f"更新设置失败: {str(e)}") - + @staticmethod async def reset_camera(): """重置相机到默认设置 / Reset camera to default settings""" from ogscope.config import get_settings - + settings = get_settings() camera = get_camera_instance() - + if camera and camera.is_initialized: camera.set_exposure(settings.camera_exposure) camera.set_gain(settings.camera_gain) - + return { "success": True, - **i18n_payload("server.cameraReset", "相机已重置到默认设置") + **i18n_payload("server.cameraReset", "相机已重置到默认设置"), } - + @staticmethod async def get_image_quality(): """获取图像质量指标 / Get image quality metrics""" - camera = get_camera_instance() - if not camera or not camera.is_initialized: - raise Exception("相机未初始化") - + # 仅使用当前已存在实例,不触发懒初始化,避免后台轮询造成反复 acquire 冲突 + # Use existing instance only; avoid lazy init from background polling. + camera = get_camera_manager().get_camera_instance() + if camera is None: + # 测试环境兼容:允许使用 monkeypatch 注入的相机实例 + # Test compatibility: allow monkeypatched injected camera instance. + try: + camera = get_camera_instance() + _attach_manager_camera_if_needed(camera) + except Exception: + camera = None + if not camera or not getattr(camera, "is_initialized", False): + return { + "success": False, + "available": False, + "quality": { + "noise_level": 0.0, + "exposure_adequacy": 0.0, + "gain_level": 0.0, + }, + **i18n_payload("server.cameraNotRunning", "相机未运行"), + } quality_metrics = camera.get_image_quality_metrics() - return {"success": True, "quality": quality_metrics} - + return {"success": True, "available": True, "quality": quality_metrics} + @staticmethod async def set_noise_reduction(level: int): """设置降噪级别 / Set noise reduction level""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + if camera.set_noise_reduction(level): return { "success": True, - **i18n_payload("server.noiseReductionSet", f"降噪级别设置为: {level}", {"level": level}) + **i18n_payload( + "server.noiseReductionSet", + f"降噪级别设置为: {level}", + {"level": level}, + ), } else: raise Exception("设置降噪级别失败") - + @staticmethod async def set_white_balance(mode: str, gain_r: float = 1.0, gain_b: float = 1.0): """设置白平衡 / Set white balance""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + if camera.set_white_balance(mode, gain_r, gain_b): return { "success": True, - **i18n_payload("server.whiteBalanceSet", f"白平衡模式设置为: {mode}", {"mode": mode}) + **i18n_payload( + "server.whiteBalanceSet", + f"白平衡模式设置为: {mode}", + {"mode": mode}, + ), } else: raise Exception("设置白平衡失败") - + @staticmethod - async def set_image_enhancement(contrast: float = 1.0, brightness: float = 0.0, - saturation: float = 1.0, sharpness: float = 1.0): + async def set_image_enhancement( + contrast: float = 1.0, + brightness: float = 0.0, + saturation: float = 1.0, + sharpness: float = 1.0, + ): """设置图像增强参数 / Set image enhancement parameters""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + if camera.set_image_enhancement(contrast, brightness, saturation, sharpness): - return {"success": True, **i18n_payload("server.imageEnhancementSet", "图像增强参数已设置")} + return { + "success": True, + **i18n_payload("server.imageEnhancementSet", "图像增强参数已设置"), + } else: raise Exception("设置图像增强参数失败") - + @staticmethod async def set_night_mode(enabled: bool): """设置夜间模式 / Set night mode""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + if camera.set_night_mode(enabled): mode_text = "启用" if enabled else "关闭" return { "success": True, - **i18n_payload("server.nightModeSet", f"夜间模式已{mode_text}", {"state": mode_text}) + **i18n_payload( + "server.nightModeSet", + f"夜间模式已{mode_text}", + {"state": mode_text}, + ), } else: raise Exception("设置夜间模式失败") - + @staticmethod async def apply_night_mode_preset(): """应用夜间模式预设 / Apply night mode preset""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + try: # 夜间模式预设参数 / Night mode preset parameters night_preset = { @@ -744,9 +1149,9 @@ async def apply_night_mode_preset(): "brightness": 0.1, "saturation": 0.8, "sharpness": 1.1, - "night_mode": True + "night_mode": True, } - + # 应用预设 / Apply preset camera.set_exposure(night_preset["exposure_us"]) camera.set_gain(night_preset["analogue_gain"], night_preset["digital_gain"]) @@ -756,60 +1161,60 @@ async def apply_night_mode_preset(): night_preset["contrast"], night_preset["brightness"], night_preset["saturation"], - night_preset["sharpness"] + night_preset["sharpness"], ) camera.set_night_mode(night_preset["night_mode"]) - + return { "success": True, "preset": night_preset, - **i18n_payload("server.nightPresetApplied", "夜间模式预设已应用") + **i18n_payload("server.nightPresetApplied", "夜间模式预设已应用"), } except Exception as e: raise Exception(f"应用夜间模式预设失败: {str(e)}") - + @staticmethod async def save_current_settings_backup(): """保存当前设置作为备份 / Save current settings as backup""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + try: backup_data = { "timestamp": datetime.now().isoformat(), - "settings": camera.get_camera_info() + "settings": camera.get_camera_info(), } - + backup_file = DEBUG_CAPTURES_DIR / "settings_backup.json" - with open(backup_file, 'w', encoding='utf-8') as f: + with open(backup_file, "w", encoding="utf-8") as f: json.dump(backup_data, f, indent=2, ensure_ascii=False) - + return { "success": True, "backup_file": str(backup_file), - **i18n_payload("server.settingsBackedUp", "当前设置已备份") + **i18n_payload("server.settingsBackedUp", "当前设置已备份"), } except Exception as e: raise Exception(f"保存设置备份失败: {str(e)}") - + @staticmethod async def restore_settings_backup(): """从备份恢复设置 / Restore settings from backup""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - + try: backup_file = DEBUG_CAPTURES_DIR / "settings_backup.json" if not backup_file.exists(): raise Exception("未找到设置备份文件") - - with open(backup_file, 'r', encoding='utf-8') as f: + + with open(backup_file, encoding="utf-8") as f: backup_data = json.load(f) - + settings = backup_data.get("settings", {}) - + # 恢复设置 / Restore settings if "exposure_us" in settings: camera.set_exposure(settings["exposure_us"]) @@ -824,38 +1229,48 @@ async def restore_settings_backup(): settings.get("contrast", 1.0), settings.get("brightness", 0.0), settings.get("saturation", 1.0), - settings.get("sharpness", 1.0) + settings.get("sharpness", 1.0), ) if "night_mode" in settings: camera.set_night_mode(settings["night_mode"]) - - return {"success": True, **i18n_payload("server.settingsRestored", "设置已从备份恢复")} + + return { + "success": True, + **i18n_payload("server.settingsRestored", "设置已从备份恢复"), + } except Exception as e: raise Exception(f"恢复设置备份失败: {str(e)}") - + @staticmethod async def set_color_mode(color_mode: str): """设置颜色模式 / Set color mode""" camera = get_camera_instance() if not camera or not camera.is_initialized: raise Exception("相机未初始化") - - if color_mode not in ['color', 'mono']: + + if color_mode not in ["color", "mono"]: raise Exception("不支持的颜色模式,只支持 'color' 或 'mono'") - + try: - if hasattr(camera, 'set_color_mode'): - success = camera.set_color_mode(color_mode) + if hasattr(camera, "set_color_mode"): + success = await get_camera_manager().reconfigure_camera( + "set_color_mode", + lambda: camera.set_color_mode(color_mode), + timeout_sec=10.0, + ) if success: + get_camera_manager().update_runtime_overrides( + {"color_mode": color_mode} + ) mode_name = "彩色" if color_mode == "color" else "黑白" return { - "success": True, + "success": True, **i18n_payload( "server.colorModeSwitched", f"颜色模式已切换为{mode_name}模式", - {"mode": mode_name} + {"mode": mode_name}, ), - "color_mode": color_mode + "color_mode": color_mode, } else: raise Exception("相机不支持颜色模式切换") @@ -867,37 +1282,37 @@ async def set_color_mode(color_mode: str): class DebugPresetService: """调试预设服务 / Debug default service""" - + @staticmethod async def get_presets(): """获取相机预设列表 / Get a list of camera presets""" presets_file = DEBUG_CAPTURES_DIR / "presets.json" - + if not presets_file.exists(): return {"presets": []} - + try: - with open(presets_file, 'r', encoding='utf-8') as f: + with open(presets_file, encoding="utf-8") as f: data = json.load(f) return {"presets": data.get("presets", [])} except Exception as e: raise Exception(f"读取预设失败: {str(e)}") - + @staticmethod - async def save_preset(preset_data: Dict[str, Any]): + async def save_preset(preset_data: dict[str, Any]): """保存相机预设 / Save camera presets""" presets_file = DEBUG_CAPTURES_DIR / "presets.json" - + # 读取现有预设 / Read existing preset presets = [] if presets_file.exists(): try: - with open(presets_file, 'r', encoding='utf-8') as f: + with open(presets_file, encoding="utf-8") as f: data = json.load(f) presets = data.get("presets", []) - except: + except Exception: presets = [] - + # 检查是否已存在同名预设 / Check if a preset with the same name already exists for i, existing_preset in enumerate(presets): if existing_preset["name"] == preset_data["name"]: @@ -908,213 +1323,295 @@ async def save_preset(preset_data: Dict[str, Any]): if len(presets) >= 10: raise Exception("预设数量已达上限(10个)") presets.append(preset_data) - + # 保存预设 / save preset try: - with open(presets_file, 'w', encoding='utf-8') as f: + with open(presets_file, "w", encoding="utf-8") as f: json.dump({"presets": presets}, f, indent=2, ensure_ascii=False) - - return {"success": True, **i18n_payload("server.presetSaved", "预设保存成功")} + + return { + "success": True, + **i18n_payload("server.presetSaved", "预设保存成功"), + } except Exception as e: raise Exception(f"保存预设失败: {str(e)}") - + @staticmethod async def apply_preset(preset_name: str): """应用相机预设 / Apply camera presets""" presets_file = DEBUG_CAPTURES_DIR / "presets.json" - + if not presets_file.exists(): raise Exception("预设文件不存在") - + try: - with open(presets_file, 'r', encoding='utf-8') as f: + with open(presets_file, encoding="utf-8") as f: data = json.load(f) presets = data.get("presets", []) - + # 查找预设 / Find a preset preset = None for p in presets: if p["name"] == preset_name: preset = p break - + if not preset: raise Exception("预设不存在") - + # 应用预设到相机 / Apply preset to camera camera = get_camera_instance() if camera and camera.is_initialized: # 自动曝光优先,避免手动参数与AE冲突 / Automatic exposure priority to avoid conflicts between manual parameters and AE auto_exposure = preset.get("auto_exposure", False) - if hasattr(camera, 'set_auto_exposure'): + if hasattr(camera, "set_auto_exposure"): camera.set_auto_exposure(auto_exposure) # 基础参数 / Basic parameters if not auto_exposure: camera.set_exposure(preset["exposure_us"]) - camera.set_gain(preset["analogue_gain"], preset.get("digital_gain", 1.0)) - + camera.set_gain( + preset["analogue_gain"], preset.get("digital_gain", 1.0) + ) + # 图像增强参数 / Image enhancement parameters - if any(key in preset for key in ["contrast", "brightness", "saturation", "sharpness"]): + if any( + key in preset + for key in ["contrast", "brightness", "saturation", "sharpness"] + ): contrast = preset.get("contrast", 1.0) brightness = preset.get("brightness", 0.0) saturation = preset.get("saturation", 1.0) sharpness = preset.get("sharpness", 1.0) - - if hasattr(camera, 'set_image_enhancement'): - camera.set_image_enhancement(contrast, brightness, saturation, sharpness) - + + if hasattr(camera, "set_image_enhancement"): + camera.set_image_enhancement( + contrast, brightness, saturation, sharpness + ) + # 高级参数 / Advanced parameters if "noise_reduction" in preset: - if hasattr(camera, 'set_noise_reduction'): + if hasattr(camera, "set_noise_reduction"): camera.set_noise_reduction(preset["noise_reduction"]) - + # 白平衡设置 / White balance settings if "white_balance_mode" in preset: mode = preset["white_balance_mode"] gain_r = preset.get("white_balance_gain_r", 1.0) gain_b = preset.get("white_balance_gain_b", 1.0) - - if hasattr(camera, 'set_white_balance'): + + if hasattr(camera, "set_white_balance"): camera.set_white_balance(mode, gain_r, gain_b) - + # 旋转角度 / rotation angle if "rotation" in preset: - if hasattr(camera, 'set_rotation'): + if hasattr(camera, "set_rotation"): camera.set_rotation(preset["rotation"]) - + # 颜色模式 / color mode if "color_mode" in preset: - if hasattr(camera, 'set_color_mode'): + if hasattr(camera, "set_color_mode"): camera.set_color_mode(preset["color_mode"]) - + return { "success": True, "preset": preset, - **i18n_payload("server.presetApplied", f"预设 '{preset_name}' 已应用", {"name": preset_name}) + **i18n_payload( + "server.presetApplied", + f"预设 '{preset_name}' 已应用", + {"name": preset_name}, + ), } - + except Exception as e: raise Exception(f"应用预设失败: {str(e)}") - + @staticmethod async def delete_preset(preset_name: str): """删除相机预设 / Delete camera preset""" presets_file = DEBUG_CAPTURES_DIR / "presets.json" - + if not presets_file.exists(): raise Exception("预设文件不存在") - + try: - with open(presets_file, 'r', encoding='utf-8') as f: + with open(presets_file, encoding="utf-8") as f: data = json.load(f) presets = data.get("presets", []) - + # 删除预设 / Delete preset original_count = len(presets) presets = [p for p in presets if p["name"] != preset_name] - + if len(presets) == original_count: raise Exception("预设不存在") - + # 保存更新后的预设 / Save updated preset - with open(presets_file, 'w', encoding='utf-8') as f: + with open(presets_file, "w", encoding="utf-8") as f: json.dump({"presets": presets}, f, indent=2, ensure_ascii=False) - - return {"success": True, **i18n_payload("server.presetDeleted", f"预设 '{preset_name}' 已删除", {"name": preset_name})} - + + return { + "success": True, + **i18n_payload( + "server.presetDeleted", + f"预设 '{preset_name}' 已删除", + {"name": preset_name}, + ), + } + except Exception as e: raise Exception(f"删除预设失败: {str(e)}") - class DebugFileService: """调试文件服务 / Debug file service""" - + @staticmethod async def get_files(): """获取拍摄文件列表 / Get shooting file list""" try: # 支持的图片格式 / Supported image formats - image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif', '.webp'} + image_extensions = { + ".jpg", + ".jpeg", + ".png", + ".bmp", + ".tiff", + ".tif", + ".webp", + } # 支持的视频格式 / Supported video formats - video_extensions = {'.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv', '.webm', '.m4v'} - + video_extensions = { + ".mp4", + ".avi", + ".mov", + ".mkv", + ".wmv", + ".flv", + ".webm", + ".m4v", + } + files = [] for file_path in DEBUG_CAPTURES_DIR.iterdir(): if file_path.is_file(): suffix = file_path.suffix.lower() if suffix in image_extensions or suffix in video_extensions: - files.append({ - "name": file_path.name, - "size": file_path.stat().st_size, - "modified": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat(), - "type": "image" if suffix in image_extensions else "video" - }) - + files.append( + { + "name": file_path.name, + "size": file_path.stat().st_size, + "modified": datetime.fromtimestamp( + file_path.stat().st_mtime + ).isoformat(), + "type": ( + "image" if suffix in image_extensions else "video" + ), + } + ) + # 按修改时间排序(最新的在前) / Sort by modification time (newest first) files.sort(key=lambda x: x["modified"], reverse=True) - + return {"files": files} - + except Exception as e: raise Exception(f"获取文件列表失败: {str(e)}") - + @staticmethod async def get_file_info(filename: str): """获取文件信息 / Get file information""" file_path = DEBUG_CAPTURES_DIR / filename info_path = DEBUG_CAPTURES_DIR / f"{file_path.stem}.txt" - + if not file_path.exists(): raise Exception("文件不存在") - + try: # 支持的图片格式 / Supported image formats - image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif', '.webp'} + image_extensions = { + ".jpg", + ".jpeg", + ".png", + ".bmp", + ".tiff", + ".tif", + ".webp", + } # 支持的视频格式 / Supported video formats - video_extensions = {'.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv', '.webm', '.m4v'} - + suffix = file_path.suffix.lower() file_type = "image" if suffix in image_extensions else "video" - + info = { "filename": filename, "size": file_path.stat().st_size, - "modified": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat(), - "type": file_type + "modified": datetime.fromtimestamp( + file_path.stat().st_mtime + ).isoformat(), + "type": file_type, } - - # 读取拍摄信息 / Read shooting information + + # 读取拍摄信息;将 camera 内字段展开到顶层以兼容前端详情 / Read sidecar; flatten camera for UI if info_path.exists(): - with open(info_path, 'r', encoding='utf-8') as f: + with open(info_path, encoding="utf-8") as f: capture_info = json.load(f) + if isinstance(capture_info, dict): + cam = capture_info.get("camera") + if isinstance(cam, dict): + for k in ( + "exposure_us", + "analogue_gain", + "digital_gain", + "fps", + "auto_exposure", + "rotation", + "sampling_mode", + "color_mode", + "sensor", + "resolution", + ): + if k not in capture_info and k in cam: + capture_info[k] = cam[k] + if capture_info.get("resolution") is None: + ow = cam.get("output_width") or cam.get("width") + oh = cam.get("output_height") or cam.get("height") + if ow and oh: + capture_info["resolution"] = f"{ow}x{oh}" + extra = capture_info.get("extra") + if isinstance(extra, dict): + for k, v in extra.items(): + if k not in capture_info: + capture_info[k] = v info.update(capture_info) - + return info - + except Exception as e: raise Exception(f"获取文件信息失败: {str(e)}") - + @staticmethod async def delete_file(filename: str): """删除文件 / Delete files""" try: file_path = DEBUG_CAPTURES_DIR / filename info_path = DEBUG_CAPTURES_DIR / f"{file_path.stem}.txt" - + if not file_path.exists(): raise Exception("文件不存在") - + # 删除主文件 / Delete master file file_path.unlink() - + # 删除对应的参数文件(如果存在) / Delete the corresponding parameter file (if it exists) if info_path.exists(): info_path.unlink() - - return i18n_payload("server.fileDeleted", f"文件 {filename} 删除成功", {"filename": filename}) - + + return i18n_payload( + "server.fileDeleted", + f"文件 {filename} 删除成功", + {"filename": filename}, + ) + except Exception as e: raise Exception(f"删除文件失败: {str(e)}") - diff --git a/ogscope/web/api/main.py b/ogscope/web/api/main.py index 79be18e..91b23f4 100644 --- a/ogscope/web/api/main.py +++ b/ogscope/web/api/main.py @@ -2,13 +2,14 @@ OGScope Web API 主路由 整合所有API模块 """ + from fastapi import APIRouter -from ogscope.web.api.camera.routes import router as camera_router + from ogscope.web.api.alignment.routes import router as alignment_router -from ogscope.web.api.system.routes import router as system_router -from ogscope.web.api.debug.routes import router as debug_router from ogscope.web.api.analysis.routes import router as analysis_router -from ogscope.web.api.catalog.routes import router as catalog_router +from ogscope.web.api.camera.routes import router as camera_router +from ogscope.web.api.debug.routes import router as debug_router +from ogscope.web.api.system.routes import router as system_router # 创建主路由器 / Create the main router router = APIRouter() @@ -19,6 +20,3 @@ router.include_router(system_router, tags=["System - 系统"]) router.include_router(debug_router, tags=["Debug - 调试"]) router.include_router(analysis_router, tags=["Analysis - 分析"]) -router.include_router(catalog_router, tags=["Catalog - 星表"]) - - diff --git a/ogscope/web/api/models/schemas.py b/ogscope/web/api/models/schemas.py index fa1fae9..f97428b 100644 --- a/ogscope/web/api/models/schemas.py +++ b/ogscope/web/api/models/schemas.py @@ -1,29 +1,33 @@ """ API 数据模型定义 """ -from pydantic import BaseModel -from typing import Optional, Dict, Any + +from typing import Any, Literal, Optional + +from pydantic import BaseModel, ConfigDict, Field, field_validator class CameraSettings(BaseModel): """相机设置 / camera settings""" + exposure: int # 曝光时间 (微秒) / Exposure time (microseconds) - gain: float # 增益 / Gain + gain: float # 增益 / Gain autoExposure: Optional[bool] = True # 自动曝光开关 / automatic exposure switch digitalGain: Optional[float] = 1.0 # 数字增益 / digital gain - contrast: Optional[float] = 1.0 # 对比度 / Contrast - brightness: Optional[float] = 0.0 # 亮度 / brightness - saturation: Optional[float] = 1.0 # 饱和度 / saturation - sharpness: Optional[float] = 1.0 # 锐度 / sharpness - noiseReduction: Optional[int] = 0 # 降噪级别 (0-4) / Noise reduction level (0-4) - whiteBalanceMode: Optional[str] = 'auto' # 白平衡模式 / white balance mode + contrast: Optional[float] = 1.0 # 对比度 / Contrast + brightness: Optional[float] = 0.0 # 亮度 / brightness + saturation: Optional[float] = 1.0 # 饱和度 / saturation + sharpness: Optional[float] = 1.0 # 锐度 / sharpness + noiseReduction: Optional[int] = 0 # 降噪级别 (0-4) / Noise reduction level (0-4) + whiteBalanceMode: Optional[str] = "auto" # 白平衡模式 / white balance mode whiteBalanceGainR: Optional[float] = 1.0 # 白平衡红色增益 / white balance red gain whiteBalanceGainB: Optional[float] = 1.0 # 白平衡蓝色增益 / white balance blue gain - colorMode: Optional[str] = 'color' # 颜色模式: 'color' | 'mono' + colorMode: Optional[str] = "color" # 颜色模式: 'color' | 'mono' class CameraPreset(BaseModel): """相机预设 / camera presets""" + name: str description: str = "" exposure_us: int @@ -38,16 +42,17 @@ class CameraPreset(BaseModel): sharpness: Optional[float] = 1.0 # 高级参数 / Advanced parameters noise_reduction: Optional[int] = 0 - white_balance_mode: Optional[str] = 'auto' + white_balance_mode: Optional[str] = "auto" white_balance_gain_r: Optional[float] = 1.0 white_balance_gain_b: Optional[float] = 1.0 # 其他参数 / Other parameters rotation: Optional[int] = 180 - color_mode: Optional[str] = 'color' # 颜色模式: 'color' | 'mono' + color_mode: Optional[str] = "color" # 颜色模式: 'color' | 'mono' class CaptureInfo(BaseModel): """拍摄信息 / Shooting information""" + filename: str timestamp: str exposure_us: int @@ -74,6 +79,7 @@ class SystemInfo(BaseModel): class AlignmentStatus(BaseModel): """校准状态 / calibration status""" + status: str azimuth_error: float altitude_error: float @@ -81,34 +87,61 @@ class AlignmentStatus(BaseModel): progress: int -class CatalogDownloadRequest(BaseModel): - """星表下载请求 / Catalog download request""" +class CentroidParamsPayload(BaseModel): + """Tetra3 提星参数覆盖(未填则用环境默认)/ Optional centroid extraction overrides.""" - source: str = "seed" - url: Optional[str] = None - magnitude_limit: float = 8.5 + model_config = ConfigDict(extra="forbid") + sigma: Optional[float] = None + max_area: Optional[int] = None + min_area: Optional[int] = None + filtsize: Optional[int] = None + binary_open: Optional[bool] = None + bg_sub_mode: Optional[str] = None + sigma_mode: Optional[str] = None + max_axis_ratio: Optional[float] = None -class CatalogBuildIndexRequest(BaseModel): - """星表索引构建请求 / Catalog build index request""" + @field_validator("filtsize") + @classmethod + def filtsize_must_be_odd(cls, v: Optional[int]) -> Optional[int]: + """滤波边长须为奇数 / Filter size must be odd (Tetra3).""" + if v is None: + return None + if v < 1: + raise ValueError("filtsize must be >= 1") + if v % 2 == 0: + raise ValueError("filtsize must be odd") + return v - magnitude_limit: float = 8.5 - ra_bin_size_deg: float = 15.0 +class AnalysisSolveImageRequest(BaseModel): + """单图解算请求(JSON body)/ Single-image plate solve request.""" -class CatalogStarUpsertRequest(BaseModel): - """星点新增/更新请求 / Catalog star upsert request""" + model_config = ConfigDict(extra="forbid") + + input_name: str + hint_ra_deg: Optional[float] = None + hint_dec_deg: Optional[float] = None + fov_estimate: Optional[float] = None + fov_max_error: Optional[float] = None + solve_timeout_ms: Optional[int] = None + solve_profile: Optional[Literal["speed", "balanced", "robust"]] = None + centroid: Optional[CentroidParamsPayload] = None + max_image_side: Optional[int] = None + large_scale_bg_subtract: Optional[bool] = False + # 结果详细程度:summary 仅返回关键字段,full 包含 tetra 原始块 / Result detail level + detail_level: Optional[Literal["summary", "full"]] = "summary" - source_id: str - ra: float - dec: float - pmra: float = 0.0 - pmdec: float = 0.0 - phot_g_mean_mag: float - name_en: Optional[str] = None - name_zh: Optional[str] = None - description_en: Optional[str] = None - description_zh: Optional[str] = None + +class AnalysisExtractPreviewRequest(BaseModel): + """提星掩膜预览请求 / Centroid extraction preview (binary mask).""" + + model_config = ConfigDict(extra="forbid") + + input_name: str + centroid: Optional[CentroidParamsPayload] = None + max_image_side: Optional[int] = None + large_scale_bg_subtract: Optional[bool] = False class AnalysisJobCreateRequest(BaseModel): @@ -120,6 +153,12 @@ class AnalysisJobCreateRequest(BaseModel): hint_dec_deg: Optional[float] = None frame_step: int = 1 max_frames: int = 180 + fov_estimate: Optional[float] = None + fov_max_error: Optional[float] = None + solve_timeout_ms: Optional[int] = None + centroid: Optional[CentroidParamsPayload] = None + max_image_side: Optional[int] = None + large_scale_bg_subtract: Optional[bool] = False class SolveFrameResult(BaseModel): @@ -128,8 +167,8 @@ class SolveFrameResult(BaseModel): frame_index: int ra_deg: float dec_deg: float - confidence: float solve_source: str + status: str = "" class AnalysisJobStatusResponse(BaseModel): @@ -140,3 +179,88 @@ class AnalysisJobStatusResponse(BaseModel): progress: float message: str = "" result_path: Optional[str] = None + + +class AnalysisSolveParamsOnly(BaseModel): + """解算参数(不含文件名,用于预设与批量)/ Solve params without input filename.""" + + model_config = ConfigDict(extra="forbid") + + hint_ra_deg: Optional[float] = None + hint_dec_deg: Optional[float] = None + fov_estimate: Optional[float] = None + fov_max_error: Optional[float] = None + solve_timeout_ms: Optional[int] = None + solve_profile: Optional[Literal["speed", "balanced", "robust"]] = None + centroid: Optional[CentroidParamsPayload] = None + max_image_side: Optional[int] = None + large_scale_bg_subtract: Optional[bool] = False + detail_level: Optional[Literal["summary", "full"]] = "summary" + + +class BatchSolveRunItem(BaseModel): + """批量解算单轮 / One batch solve run.""" + + label: str + params: AnalysisSolveParamsOnly + + +class AnalysisBatchSolveRequest(BaseModel): + """批量解算请求 / Batch plate solve request.""" + + model_config = ConfigDict(extra="forbid") + + input_name: str + runs: list[BatchSolveRunItem] + + +class AnalysisPresetCreate(BaseModel): + """用户预设创建 / User preset create.""" + + model_config = ConfigDict(extra="forbid") + + name: str + params: AnalysisSolveParamsOnly + + +class AnalysisExperimentCreate(BaseModel): + """实验记录保存 / Save experiment record.""" + + model_config = ConfigDict(extra="forbid") + + input_name: str + preset_label: str + result_json: dict[str, Any] + metrics: dict[str, Any] = Field(default_factory=dict) + thumbnail_png_base64: Optional[str] = None + replay: Optional[dict[str, Any]] = None + save_asset_snapshot: bool = True + + +class AnalysisSolveVideoFrameRequest(BaseModel): + """单帧解算:相机 BGR 或素材池视频 seek / Solve one frame from camera or pool video.""" + + model_config = ConfigDict(extra="forbid") + + source: Literal["camera", "file"] + input_name: Optional[str] = None + frame_index: int = 0 + time_sec: Optional[float] = None + hint_ra_deg: Optional[float] = None + hint_dec_deg: Optional[float] = None + fov_estimate: Optional[float] = None + fov_max_error: Optional[float] = None + solve_timeout_ms: Optional[int] = None + solve_profile: Optional[Literal["speed", "balanced", "robust"]] = None + centroid: Optional[CentroidParamsPayload] = None + max_image_side: Optional[int] = None + large_scale_bg_subtract: Optional[bool] = False + detail_level: Optional[Literal["summary", "full"]] = "summary" + + +class ImportFromDebugRequest(BaseModel): + """从调试采集目录导入到分析素材池 / Import capture into analysis pool.""" + + model_config = ConfigDict(extra="forbid") + + filename: str diff --git a/ogscope/web/api/system/routes.py b/ogscope/web/api/system/routes.py index 05aa6a0..cef7897 100644 --- a/ogscope/web/api/system/routes.py +++ b/ogscope/web/api/system/routes.py @@ -1,7 +1,9 @@ """ 系统相关API路由 """ + from fastapi import APIRouter + from ogscope.web.api.models.schemas import SystemInfo from ogscope.web.api.system.services import system_info_service diff --git a/ogscope/web/api/system/services.py b/ogscope/web/api/system/services.py index 14011ce..616260f 100644 --- a/ogscope/web/api/system/services.py +++ b/ogscope/web/api/system/services.py @@ -4,8 +4,8 @@ from __future__ import annotations -import platform import os +import platform import time from pathlib import Path from threading import Lock @@ -169,7 +169,11 @@ def _read_wifi_metrics(self) -> tuple[float | None, float | None, str | None]: link_quality = float(values[0].rstrip(".")) signal_level = float(values[1].rstrip(".")) quality_percent = max(0.0, min(100.0, (link_quality / 70.0) * 100.0)) - return round(quality_percent, 2), round(signal_level, 2), interface.strip() + return ( + round(quality_percent, 2), + round(signal_level, 2), + interface.strip(), + ) except ValueError: continue return None, None, None diff --git a/ogscope/web/app.py b/ogscope/web/app.py index 0f1dc75..12fd1ca 100644 --- a/ogscope/web/app.py +++ b/ogscope/web/app.py @@ -1,16 +1,18 @@ """ FastAPI Web 应用 """ + +import asyncio +from collections.abc import AsyncGenerator from contextlib import asynccontextmanager from pathlib import Path -from typing import AsyncGenerator from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.openapi.docs import get_redoc_html +from fastapi.responses import FileResponse, HTMLResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates -from fastapi.responses import HTMLResponse from loguru import logger from ogscope.__version__ import __version__ @@ -23,17 +25,54 @@ async def lifespan(app: FastAPI) -> AsyncGenerator: """应用生命周期管理 / Application life cycle management""" # 启动时执行 / Execute at startup logger.info("初始化 Web 应用...") - - # TODO: 初始化数据库连接 / TODO: Initialize database connection - # TODO: 初始化相机 / TODO: Initialize camera - # TODO: 初始化其他资源 / TODO: Initialize other resources - + + # 真实硬件下后台预热相机:不阻塞 Uvicorn 就绪;首次请求仍可在锁内完成初始化 + # Background warm-up on real hardware: do not block server readiness; first request can still init under lock. + async def _warm_camera() -> None: + try: + from ogscope.utils.environment import should_use_simulation_mode + + if not should_use_simulation_mode(): + from ogscope.web.camera_shared import get_camera_manager + + await get_camera_manager().ensure_started() + logger.info( + "相机已启动并进入共享预览缓存 / Camera streaming (shared preview cache)" + ) + except Exception as e: + logger.warning( + f"启动时相机预热失败,将在首次请求时重试 / Camera warm-up failed, retry on demand: {e}" + ) + + async def _warm_solver() -> None: + try: + from ogscope.algorithms.plate_solve.solver import warmup_tetra3 + + await asyncio.to_thread(warmup_tetra3) + logger.info("解算器已预热 / Plate solver warmed up") + except Exception as e: + logger.warning( + f"启动时解算器预热失败,将在首次解算时重试 / Solver warm-up failed, retry on first solve: {e}" + ) + + asyncio.create_task(_warm_camera()) + # 解算器预热改为启动阶段阻塞完成,避免首个解算请求与后台预热竞态导致“第一次明显变慢” + # Warm the solver synchronously during startup to avoid first-request cold-start race. + await _warm_solver() + yield - + # 关闭时执行 / Execute on shutdown logger.info("清理资源...") - # TODO: 关闭数据库连接 / TODO: Close database connection - # TODO: 释放相机资源 / TODO: Release camera resources + try: + from ogscope.utils.environment import should_use_simulation_mode + + if not should_use_simulation_mode(): + from ogscope.web.camera_shared import get_camera_manager + + await get_camera_manager().stop() + except Exception as e: + logger.warning(f"关闭相机失败 / Failed to stop camera on shutdown: {e}") # API 文档分组标签 / API documentation group tags @@ -86,10 +125,13 @@ def _asset_stamp(path: Path) -> int: except Exception: return 0 + # 配置 CORS (允许跨域请求) / Configure CORS (allow cross-origin requests) app.add_middleware( CORSMiddleware, - allow_origins=["*"], # 生产环境应该限制具体域名 / Production environments should restrict specific domain names + allow_origins=[ + "*" + ], # 生产环境应该限制具体域名 / Production environments should restrict specific domain names allow_credentials=True, allow_methods=["*"], allow_headers=["*"], @@ -102,7 +144,10 @@ def _asset_stamp(path: Path) -> int: # 挂载Web模板和manifest / Mount web templates and manifests if settings.template_dir.exists(): from fastapi.staticfiles import StaticFiles - app.mount("/web", StaticFiles(directory=str(settings.template_dir.parent)), name="web") + + app.mount( + "/web", StaticFiles(directory=str(settings.template_dir.parent)), name="web" + ) # 注册路由 / Register route app.include_router(api_router, prefix="/api") @@ -112,12 +157,8 @@ def _asset_stamp(path: Path) -> int: async def root(request: Request): """根路径 - 返回主页面 / Root path - return to main page""" return templates.TemplateResponse( - "index.html", - { - "request": request, - "version": __version__, - "app_name": "OGScope" - } + "index.html", + {"request": request, "version": __version__, "app_name": "OGScope"}, ) @@ -126,28 +167,36 @@ async def debug_console(request: Request): """调试控制台页面 / Debug console page""" debug_js_path = settings.static_dir / "js" / "debug.js" return templates.TemplateResponse( - "debug.html", + "debug.html", { "request": request, "version": __version__, "app_name": "OGScope Debug Console", "debug_assets_version": _asset_stamp(debug_js_path), - } + }, ) @app.get("/debug/analysis", response_class=HTMLResponse) async def debug_analysis_console(request: Request): - """星图解算调试页面 / Plate solve debug page""" + """星空解算控制台(Vite 构建 SPA)或回退旧模板 / Plate solve console SPA or legacy template.""" + lab_index = settings.static_dir / "analysis-lab" / "index.html" + if lab_index.is_file(): + return FileResponse(lab_index) + da_js = settings.static_dir / "js" / "debug-analysis.js" + da_css = settings.static_dir / "css" / "debug-analysis.css" + debug_analysis_assets_version = f"{_asset_stamp(da_js)}-{_asset_stamp(da_css)}" return templates.TemplateResponse( "debug_analysis.html", { "request": request, "version": __version__, "app_name": "OGScope Plate Solve Debug Console", + "debug_analysis_assets_version": debug_analysis_assets_version, }, ) + @app.get("/api") async def api_root(): """API根路径 / API root path""" @@ -161,8 +210,7 @@ async def api_root(): "alignment": "/api/alignment/", "system": "/api/system/", "analysis": "/api/analysis/", - "catalog": "/api/catalog/", - } + }, } @@ -183,4 +231,3 @@ async def health_check(): "status": "healthy", "version": __version__, } - diff --git a/ogscope/web/camera_shared.py b/ogscope/web/camera_shared.py new file mode 100644 index 0000000..a5d0ea2 --- /dev/null +++ b/ogscope/web/camera_shared.py @@ -0,0 +1,336 @@ +""" +统一相机管理与共享帧总线 / Unified camera manager and shared frame bus. +""" + +from __future__ import annotations + +import asyncio +import logging +import os +import time +from dataclasses import dataclass +from threading import Lock +from typing import Any, Callable + + +@dataclass(slots=True) +class SharedFrame: + """共享帧快照 / Shared frame snapshot.""" + + frame_id: int + timestamp: float + raw_frame: Any | None + jpeg_frame: bytes | None + width: int + height: int + + +class CameraManager: + """全局单相机控制器(控制面+数据面)/ Global single-camera controller.""" + + def __init__(self) -> None: + self._camera = None + self._control_lock = asyncio.Lock() + self._read_lock = Lock() + self._frame_lock = Lock() + self._grabber_task: asyncio.Task | None = None + self._frame_id = 0 + self._latest_raw = None + self._latest_jpeg: bytes | None = None + self._latest_ts = 0.0 + self._latest_w = 0 + self._latest_h = 0 + self._runtime_overrides: dict[str, Any] = {} + self._jpeg_quality = int(os.getenv("OGSCOPE_PREVIEW_JPEG_QUALITY", "75")) + self._target_fps = max(1, int(os.getenv("OGSCOPE_SHARED_PREVIEW_FPS", "8"))) + self._logger = logging.getLogger(__name__) + + def _build_base_config(self) -> dict[str, Any]: + from ogscope.config import get_settings + + settings = get_settings() + base = { + "type": "imx327_mipi", + "width": settings.camera_width, + "height": settings.camera_height, + "fps": max(1, int(getattr(settings, "camera_fps", 5) or 5)), + "exposure_us": settings.camera_exposure, + "analogue_gain": settings.camera_gain, + "auto_exposure": True, + "rotation": 180, + "sampling_mode": getattr(settings, "camera_sampling_mode", "native"), + "noise_reduction": 0, + "white_balance_mode": "auto", + "white_balance_gain_r": 1.0, + "white_balance_gain_b": 1.0, + "contrast": 1.0, + "brightness": 0.0, + "saturation": 1.0, + "sharpness": 1.0, + "night_mode": False, + "color_mode": "color", + } + return {**base, **self._runtime_overrides} + + def _create_camera_sync(self): + from ogscope.hardware.camera import create_camera + + config = self._build_base_config() + camera = create_camera(config) + if camera and camera.initialize(): + return camera + return None + + def _encode_preview_jpeg_sync(self, frame) -> bytes | None: + try: + import cv2 + + ok, buf = cv2.imencode( + ".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, int(self._jpeg_quality)] + ) + if not ok: + return None + return buf.tobytes() + except Exception: + return None + + def _read_frame_sync(self): + with self._read_lock: + if self._camera is None or not getattr(self._camera, "is_capturing", False): + return None + return self._camera.get_video_frame() + + async def ensure_started(self) -> None: + """确保单相机进入采集并启动共享帧抓取 / Ensure capture and shared frame grabber.""" + async with self._control_lock: + if self._camera is None: + self._camera = await asyncio.to_thread(self._create_camera_sync) + if self._camera is None: + raise RuntimeError("相机初始化失败 / Camera init failed") + if not getattr(self._camera, "is_capturing", False): + ok = await asyncio.to_thread(self._camera.start_capture) + if not ok: + raise RuntimeError("相机启动失败 / Camera start failed") + await self._ensure_grabber_locked() + + async def stop(self) -> None: + """停止相机采集 / Stop camera capture.""" + async with self._control_lock: + await self._stop_grabber_locked() + if self._camera is not None and getattr( + self._camera, "is_capturing", False + ): + await asyncio.to_thread(self._camera.stop_capture) + + async def pause_grabber(self) -> None: + """暂停共享抓帧任务(保留采集)/ Pause shared frame grabber only.""" + async with self._control_lock: + await self._stop_grabber_locked() + + async def resume_grabber(self) -> None: + """恢复共享抓帧任务 / Resume shared frame grabber.""" + async with self._control_lock: + if self._camera is not None and getattr( + self._camera, "is_capturing", False + ): + await self._ensure_grabber_locked() + + def _call_with_read_lock(self, fn: Callable[[], Any]) -> Any: + """在读锁下执行阻塞操作,避免与抓帧并发 / Run blocking op under read lock.""" + with self._read_lock: + return fn() + + async def reconfigure_camera( + self, + operation_name: str, + fn: Callable[[], Any], + *, + timeout_sec: float = 10.0, + ) -> Any: + """受控重配置:同一临界区内停抓帧->改参->恢复 / Controlled reconfigure.""" + async with self._control_lock: + t0 = time.time() + await self._stop_grabber_locked() + try: + result = await asyncio.wait_for( + asyncio.to_thread(self._call_with_read_lock, fn), + timeout=timeout_sec, + ) + return result + finally: + if self._camera is not None and getattr( + self._camera, "is_capturing", False + ): + await self._ensure_grabber_locked() + self._logger.info( + "camera_reconfigure_done op=%s cost_ms=%.2f", + operation_name, + (time.time() - t0) * 1000.0, + ) + + async def _ensure_grabber_locked(self) -> None: + if self._grabber_task and not self._grabber_task.done(): + return + self._grabber_task = asyncio.create_task(self._grabber_loop()) + + async def _stop_grabber_locked(self) -> None: + if not self._grabber_task: + return + self._grabber_task.cancel() + try: + await asyncio.wait_for(self._grabber_task, timeout=2.0) + except asyncio.CancelledError: + # 抓帧任务被取消属于正常停止流程,不应向上抛出 + # Task cancellation is expected during graceful stop. + pass + except Exception: + pass + self._grabber_task = None + + async def _grabber_loop(self) -> None: + interval = 1.0 / float(self._target_fps) + loop = asyncio.get_running_loop() + try: + while True: + t0 = time.time() + try: + frame = await asyncio.to_thread(self._read_frame_sync) + if frame is not None: + jpeg = await loop.run_in_executor( + None, self._encode_preview_jpeg_sync, frame + ) + h = int(getattr(frame, "shape", [0, 0])[0] or 0) + w = int(getattr(frame, "shape", [0, 0])[1] or 0) + with self._frame_lock: + self._frame_id += 1 + self._latest_raw = frame + self._latest_jpeg = jpeg + self._latest_ts = time.time() + self._latest_w = w + self._latest_h = h + except Exception as e: + self._logger.error(f"共享抓帧循环异常 / Shared grabber error: {e}") + spent = time.time() - t0 + await asyncio.sleep(max(0.0, interval - spent)) + except asyncio.CancelledError: + raise + + def get_camera_instance(self): + """兼容接口:返回全局相机实例 / Compat accessor for global camera object.""" + return self._camera + + def ensure_camera_instance_sync(self): + """兼容旧接口:仅返回当前实例,不再在锁外触发初始化 / Compat: return existing camera only.""" + return self._camera + + def attach_camera_instance(self, camera: Any) -> None: + """注入现有相机实例(测试/兼容)/ Attach existing camera instance (tests/compat).""" + self._camera = camera + + async def status(self) -> dict[str, Any]: + cam = self._camera + if cam is None: + return { + "connected": False, + "streaming": False, + "runtime_overrides": self._runtime_overrides, + } + info = await asyncio.to_thread(cam.get_camera_info) + return { + "connected": bool(getattr(cam, "is_initialized", False)), + "streaming": bool(getattr(cam, "is_capturing", False)), + "info": info, + "runtime_overrides": self._runtime_overrides, + } + + async def get_preview_frame( + self, since_id: int | None = None, wait_timeout_sec: float = 0.8 + ) -> tuple[int, SharedFrame | None]: + """读取预览帧;如未更新则返回 304 / Get preview frame; return 304 if unchanged.""" + await self.ensure_started() + deadline = time.time() + max(0.0, float(wait_timeout_sec)) + while True: + with self._frame_lock: + if self._frame_id > 0 and self._latest_jpeg is not None: + if since_id is not None and since_id == self._frame_id: + return 304, None + snap = SharedFrame( + frame_id=self._frame_id, + timestamp=self._latest_ts, + raw_frame=self._latest_raw, + jpeg_frame=self._latest_jpeg, + width=self._latest_w, + height=self._latest_h, + ) + return 200, snap + if time.time() >= deadline: + return 503, None + await asyncio.sleep(0.02) + + async def get_raw_frame(self) -> tuple[Any, int, float]: + """读取分析帧 / Get frame for analysis.""" + await self.ensure_started() + with self._frame_lock: + if self._latest_raw is None: + raise RuntimeError("无可用视频帧 / No frame available") + try: + frame = self._latest_raw.copy() + except Exception: + frame = self._latest_raw + return frame, self._frame_id, self._latest_ts + + async def get_cached_frame_snapshot(self) -> SharedFrame | None: + """读取当前缓存帧快照(不触发 ensure)/ Read cached snapshot without ensure.""" + with self._frame_lock: + if self._frame_id <= 0: + return None + return SharedFrame( + frame_id=self._frame_id, + timestamp=self._latest_ts, + raw_frame=self._latest_raw, + jpeg_frame=self._latest_jpeg, + width=self._latest_w, + height=self._latest_h, + ) + + @staticmethod + def encode_frame( + raw_frame: Any, image_format: str = "jpeg", quality: int = 75 + ) -> bytes | None: + """将原始帧编码为图像字节 / Encode raw frame to image bytes.""" + try: + import cv2 + + if image_format.lower() == "png": + ok, buf = cv2.imencode(".png", raw_frame) + else: + ok, buf = cv2.imencode( + ".jpg", + raw_frame, + [cv2.IMWRITE_JPEG_QUALITY, int(max(10, min(100, quality)))], + ) + if not ok: + return None + return buf.tobytes() + except Exception: + return None + + def update_runtime_overrides(self, updates: dict[str, Any]) -> None: + """更新运行时覆盖参数(不落盘)/ Update runtime overrides (memory only).""" + self._runtime_overrides.update(updates) + + def get_runtime_overrides(self) -> dict[str, Any]: + """读取运行时覆盖参数 / Read runtime overrides.""" + return dict(self._runtime_overrides) + + def clear_runtime_overrides(self) -> None: + """清空运行时覆盖参数 / Clear runtime overrides.""" + self._runtime_overrides.clear() + + +_camera_manager = CameraManager() + + +def get_camera_manager() -> CameraManager: + """获取全局相机管理器 / Get global camera manager.""" + return _camera_manager diff --git a/poetry.lock b/poetry.lock index ed35162..ae7f56e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -466,6 +466,8 @@ files = [ {file = "greenlet-3.2.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2ca18a03a8cfb5b25bc1cbe20f3d9a4c80d8c3b13ba3df49ac3961af0b1018d"}, {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fe0a28a7b952a21e2c062cd5756d34354117796c6d9215a87f55e38d15402c5"}, {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8854167e06950ca75b898b104b63cc646573aa5fef1353d4508ecdd1ee76254f"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f47617f698838ba98f4ff4189aef02e7343952df3a615f847bb575c3feb177a7"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af41be48a4f60429d5cad9d22175217805098a9ef7c40bfef44f7669fb9d74d8"}, {file = "greenlet-3.2.4-cp310-cp310-win_amd64.whl", hash = "sha256:73f49b5368b5359d04e18d15828eecc1806033db5233397748f4ca813ff1056c"}, {file = "greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2"}, {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246"}, @@ -475,6 +477,8 @@ files = [ {file = "greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8"}, {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52"}, {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9c6de1940a7d828635fbd254d69db79e54619f165ee7ce32fda763a9cb6a58c"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03c5136e7be905045160b1b9fdca93dd6727b180feeafda6818e6496434ed8c5"}, {file = "greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9"}, {file = "greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd"}, {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb"}, @@ -484,6 +488,8 @@ files = [ {file = "greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0"}, {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0"}, {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d"}, {file = "greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02"}, {file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"}, {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"}, @@ -493,6 +499,8 @@ files = [ {file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"}, {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"}, {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929"}, {file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"}, {file = "greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0"}, {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f"}, @@ -500,6 +508,8 @@ files = [ {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1"}, {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735"}, {file = "greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337"}, + {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269"}, + {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681"}, {file = "greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01"}, {file = "greenlet-3.2.4-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:b6a7c19cf0d2742d0809a4c05975db036fdff50cd294a93632d6a310bf9ac02c"}, {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:27890167f55d2387576d1f41d9487ef171849ea0359ce1510ca6e06c8bece11d"}, @@ -509,6 +519,8 @@ files = [ {file = "greenlet-3.2.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9913f1a30e4526f432991f89ae263459b1c64d1608c0d22a5c79c287b3c70df"}, {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b90654e092f928f110e0007f572007c9727b5265f7632c2fa7415b4689351594"}, {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81701fd84f26330f0d5f4944d4e92e61afe6319dcd9775e39396e39d7c3e5f98"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:28a3c6b7cd72a96f61b0e4b2a36f681025b60ae4779cc73c1535eb5f29560b10"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:52206cd642670b0b320a1fd1cbfd95bca0e043179c1d8a045f2c6109dfe973be"}, {file = "greenlet-3.2.4-cp39-cp39-win32.whl", hash = "sha256:65458b409c1ed459ea899e939f0e1cdb14f58dbc803f2f93c5eab5694d32671b"}, {file = "greenlet-3.2.4-cp39-cp39-win_amd64.whl", hash = "sha256:d2e685ade4dafd447ede19c31277a224a239a0a1a4eca4e6390efedf20260cfb"}, {file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"}, @@ -1807,6 +1819,70 @@ files = [ {file = "ruff-0.2.2.tar.gz", hash = "sha256:e62ed7f36b3068a30ba39193a14274cd706bc486fad521276458022f7bccb31d"}, ] +[[package]] +name = "scipy" +version = "1.15.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "scipy-1.15.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a345928c86d535060c9c2b25e71e87c39ab2f22fc96e9636bd74d1dbf9de448c"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:ad3432cb0f9ed87477a8d97f03b763fd1d57709f1bbde3c9369b1dff5503b253"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:aef683a9ae6eb00728a542b796f52a5477b78252edede72b8327a886ab63293f"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:1c832e1bd78dea67d5c16f786681b28dd695a8cb1fb90af2e27580d3d0967e92"}, + {file = "scipy-1.15.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:263961f658ce2165bbd7b99fa5135195c3a12d9bef045345016b8b50c315cb82"}, + {file = "scipy-1.15.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2abc762b0811e09a0d3258abee2d98e0c703eee49464ce0069590846f31d40"}, + {file = "scipy-1.15.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ed7284b21a7a0c8f1b6e5977ac05396c0d008b89e05498c8b7e8f4a1423bba0e"}, + {file = "scipy-1.15.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5380741e53df2c566f4d234b100a484b420af85deb39ea35a1cc1be84ff53a5c"}, + {file = "scipy-1.15.3-cp310-cp310-win_amd64.whl", hash = "sha256:9d61e97b186a57350f6d6fd72640f9e99d5a4a2b8fbf4b9ee9a841eab327dc13"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:993439ce220d25e3696d1b23b233dd010169b62f6456488567e830654ee37a6b"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:34716e281f181a02341ddeaad584205bd2fd3c242063bd3423d61ac259ca7eba"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3b0334816afb8b91dab859281b1b9786934392aa3d527cd847e41bb6f45bee65"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:6db907c7368e3092e24919b5e31c76998b0ce1684d51a90943cb0ed1b4ffd6c1"}, + {file = "scipy-1.15.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:721d6b4ef5dc82ca8968c25b111e307083d7ca9091bc38163fb89243e85e3889"}, + {file = "scipy-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39cb9c62e471b1bb3750066ecc3a3f3052b37751c7c3dfd0fd7e48900ed52982"}, + {file = "scipy-1.15.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:795c46999bae845966368a3c013e0e00947932d68e235702b5c3f6ea799aa8c9"}, + {file = "scipy-1.15.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:18aaacb735ab38b38db42cb01f6b92a2d0d4b6aabefeb07f02849e47f8fb3594"}, + {file = "scipy-1.15.3-cp311-cp311-win_amd64.whl", hash = "sha256:ae48a786a28412d744c62fd7816a4118ef97e5be0bee968ce8f0a2fba7acf3bb"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac6310fdbfb7aa6612408bd2f07295bcbd3fda00d2d702178434751fe48e019"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:185cd3d6d05ca4b44a8f1595af87f9c372bb6acf9c808e99aa3e9aa03bd98cf6"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:05dc6abcd105e1a29f95eada46d4a3f251743cfd7d3ae8ddb4088047f24ea477"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:06efcba926324df1696931a57a176c80848ccd67ce6ad020c810736bfd58eb1c"}, + {file = "scipy-1.15.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05045d8b9bfd807ee1b9f38761993297b10b245f012b11b13b91ba8945f7e45"}, + {file = "scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271e3713e645149ea5ea3e97b57fdab61ce61333f97cfae392c28ba786f9bb49"}, + {file = "scipy-1.15.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cfd56fc1a8e53f6e89ba3a7a7251f7396412d655bca2aa5611c8ec9a6784a1e"}, + {file = "scipy-1.15.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ff17c0bb1cb32952c09217d8d1eed9b53d1463e5f1dd6052c7857f83127d539"}, + {file = "scipy-1.15.3-cp312-cp312-win_amd64.whl", hash = "sha256:52092bc0472cfd17df49ff17e70624345efece4e1a12b23783a1ac59a1b728ed"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c620736bcc334782e24d173c0fdbb7590a0a436d2fdf39310a8902505008759"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:7e11270a000969409d37ed399585ee530b9ef6aa99d50c019de4cb01e8e54e62"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8c9ed3ba2c8a2ce098163a9bdb26f891746d02136995df25227a20e71c396ebb"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0bdd905264c0c9cfa74a4772cdb2070171790381a5c4d312c973382fc6eaf730"}, + {file = "scipy-1.15.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79167bba085c31f38603e11a267d862957cbb3ce018d8b38f79ac043bc92d825"}, + {file = "scipy-1.15.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9deabd6d547aee2c9a81dee6cc96c6d7e9a9b1953f74850c179f91fdc729cb7"}, + {file = "scipy-1.15.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dde4fc32993071ac0c7dd2d82569e544f0bdaff66269cb475e0f369adad13f11"}, + {file = "scipy-1.15.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f77f853d584e72e874d87357ad70f44b437331507d1c311457bed8ed2b956126"}, + {file = "scipy-1.15.3-cp313-cp313-win_amd64.whl", hash = "sha256:b90ab29d0c37ec9bf55424c064312930ca5f4bde15ee8619ee44e69319aab163"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3ac07623267feb3ae308487c260ac684b32ea35fd81e12845039952f558047b8"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6487aa99c2a3d509a5227d9a5e889ff05830a06b2ce08ec30df6d79db5fcd5c5"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:50f9e62461c95d933d5c5ef4a1f2ebf9a2b4e83b0db374cb3f1de104d935922e"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14ed70039d182f411ffc74789a16df3835e05dc469b898233a245cdfd7f162cb"}, + {file = "scipy-1.15.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a769105537aa07a69468a0eefcd121be52006db61cdd8cac8a0e68980bbb723"}, + {file = "scipy-1.15.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db984639887e3dffb3928d118145ffe40eff2fa40cb241a306ec57c219ebbbb"}, + {file = "scipy-1.15.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:40e54d5c7e7ebf1aa596c374c49fa3135f04648a0caabcb66c52884b943f02b4"}, + {file = "scipy-1.15.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5e721fed53187e71d0ccf382b6bf977644c533e506c4d33c3fb24de89f5c3ed5"}, + {file = "scipy-1.15.3-cp313-cp313t-win_amd64.whl", hash = "sha256:76ad1fb5f8752eabf0fa02e4cc0336b4e8f021e2d5f061ed37d6d264db35e3ca"}, + {file = "scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.5" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "sgp4" version = "2.25" @@ -2499,4 +2575,4 @@ dev = ["black (>=19.3b0) ; python_version >= \"3.6\"", "pytest (>=4.6.2)"] [metadata] lock-version = "2.1" python-versions = "^3.10" -content-hash = "da23376fd885525228759d85ead0736c9f756a19b2011bc1d55e6346eb97ef53" +content-hash = "2f2492fd75140682277386e834480a190c21a6919e5087722cedf5d8982d0a67" diff --git a/pyproject.toml b/pyproject.toml index 1a5ead9..b42f959 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ aiofiles = "^23.2.1" # 异步文件操作 numpy = ">=2,<3" opencv-python-headless = ">=4.12,<5" pillow = ">=10,<13" +scipy = ">=1.10,<1.17" # 相机支持 (Raspberry Pi MIPI) # picamera2 = "^0.3.0" # 树莓派 MIPI 相机支持 (仅Linux) @@ -94,7 +95,11 @@ build-backend = "poetry.core.masonry.api" [tool.ruff] line-length = 88 +# 保持 py39 以匹配既有代码风格,避免 UP007 等大规模改写 / Match legacy style; py310 enables many UP* rewrites target-version = "py39" +exclude = ["ogscope/vendor"] + +[tool.ruff.lint] select = [ "E", # pycodestyle errors "W", # pycodestyle warnings @@ -107,15 +112,17 @@ select = [ ignore = [ "E501", # line too long (handled by black) "B008", # do not perform function calls in argument defaults + "B904", # raise from inside except (HTTPException 风格常见 / common in API handlers) "C901", # too complex ] -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "__init__.py" = ["F401"] # 允许未使用的导入 +"ogscope/__init__.py" = ["E402"] # vendor 路径需在 import 前注入 / vendor path before imports [tool.black] line-length = 88 -target-version = ['py39'] +target-version = ['py310'] include = '\.pyi?$' extend-exclude = ''' /( @@ -128,11 +135,12 @@ extend-exclude = ''' | \.venv | build | dist + | ogscope/vendor )/ ''' [tool.mypy] -python_version = "3.9" +python_version = "3.10" warn_return_any = true warn_unused_configs = true disallow_untyped_defs = false # 逐步启用 diff --git a/scripts/board-update.sh b/scripts/board-update.sh new file mode 100755 index 0000000..f013b88 --- /dev/null +++ b/scripts/board-update.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# OGScope 开发板增量更新 / Board incremental update (after first install) +# +# 环境变量 / Environment: +# OGSCOPE_GIT_PULL=1 — 在更新前执行 git pull(需 git 仓库)/ Run git pull before update (requires .git) +# OGSCOPE_INSTALL_DEV=1 — poetry install 时包含 dev 依赖 / Include dev dependency group +# POETRY_INSTALLER_MAX_WORKERS — 默认 2,低配板可设为 1 / Default 2; set to 1 on low-RAM boards +# OGSCOPE_MIRROR=auto|cn|international — 与 install.sh 相同 / Same as install.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" +SERVICE_NAME="ogscope" + +cd "${PROJECT_DIR}" + +# 加载镜像逻辑 / Load mirror helpers +# shellcheck source=mirror.sh +source "${SCRIPT_DIR}/mirror.sh" +OGSCOPE_MIRROR_RESOLVED="$(ogscope_resolve_mirror)" +echo "🌐 镜像模式 / Mirror: ${OGSCOPE_MIRROR_RESOLVED}(OGSCOPE_MIRROR=${OGSCOPE_MIRROR:-auto})" + +if [ ! -f "${PROJECT_DIR}/pyproject.toml" ]; then + echo "❌ 未找到 pyproject.toml / pyproject.toml not found" + exit 1 +fi + +export PATH="${HOME}/.local/bin:${PATH}" +if ! command -v poetry >/dev/null 2>&1; then + echo "❌ 未找到 Poetry,请先运行 ./scripts/install.sh / Poetry not found; run ./scripts/install.sh first" + exit 1 +fi + +if [ "${OGSCOPE_GIT_PULL:-}" = "1" ]; then + if [ -d "${PROJECT_DIR}/.git" ]; then + echo "📥 git pull..." + git pull --ff-only + else + echo "⚠️ 非 git 仓库,跳过 git pull / Not a git repo; skipping git pull" + fi +fi + +# 与 install.sh 保持一致,避免 PEP 668 / Match install.sh; avoid PEP 668 issues +poetry config virtualenvs.create true +poetry config virtualenvs.in-project true +poetry config virtualenvs.options.system-site-packages true 2>/dev/null || true + +INSTALL_ARGS=(install --no-interaction) +if [ "${OGSCOPE_INSTALL_DEV:-}" = "1" ]; then + echo "📦 poetry install(含 dev / with dev)..." +else + INSTALL_ARGS+=(--only main) + echo "📦 poetry install --only main..." +fi + +export POETRY_INSTALLER_MAX_WORKERS="${POETRY_INSTALLER_MAX_WORKERS:-2}" + +if [ "${OGSCOPE_MIRROR_RESOLVED}" = "cn" ]; then + ogscope_export_pypi_mirror_cn +else + ogscope_export_pypi_mirror_international +fi + +poetry "${INSTALL_ARGS[@]}" + +# numpy/scipy 与 lock 一致;Poetry 偶发「无更新」但 wheel 未落盘 / Align deps with lock; retry if missing +if ! ogscope_verify_numpy_scipy; then + echo "⚠️ numpy/scipy 导入失败,使用 --no-cache 重试 poetry install / Import failed; retrying poetry with --no-cache" + poetry "${INSTALL_ARGS[@]}" --no-cache +fi +if ! ogscope_verify_numpy_scipy; then + echo "⚠️ 仍缺少 scipy,使用 pip 补装(与 pyproject 版本约束一致)/ scipy still missing; pip install (same constraints)" + poetry run pip install --no-cache-dir "scipy>=1.10,<1.17" +fi +if ! ogscope_verify_numpy_scipy; then + echo "❌ numpy/scipy 仍不可用。请删除 .venv 后重试: rm -rf .venv && OGSCOPE_MIRROR=cn ./scripts/board-update.sh" + echo "❌ Still failing. Try: rm -rf .venv && ./scripts/board-update.sh" + exit 1 +fi +echo "✅ numpy/scipy 已就绪 / numpy & scipy OK" + +VENV_PYTHON="$(poetry env info --path)/bin/python" +SERVICE_PATH="/etc/systemd/system/${SERVICE_NAME}.service" +ogscope_sync_systemd_execstart_if_needed "${SERVICE_PATH}" "${VENV_PYTHON}" + +echo "🔄 重启服务 / Restarting service..." +sudo systemctl daemon-reload +sudo systemctl restart "${SERVICE_NAME}" + +sleep 2 +sudo systemctl --no-pager status "${SERVICE_NAME}" || true + +echo "" +echo "✅ 更新完成 / Update done. 日志 / Logs: sudo journalctl -u ${SERVICE_NAME} -f" +echo "健康检查 / Health: curl -s http://127.0.0.1:8000/health" diff --git a/scripts/install.sh b/scripts/install.sh index 6b37561..2e15f5a 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -1,41 +1,71 @@ #!/bin/bash # OGScope 安装脚本 / OGScope installation script -# 适用于 Raspberry Pi / For Raspberry Pi +# 适用于 Raspberry Pi / Orange Pi 等嵌入式板 / For Raspberry Pi, Orange Pi, etc. +# +# 环境变量 / Environment: +# OGSCOPE_INSTALL_DEV=1 — 安装含 dev 依赖(开发机);默认仅 main / Install dev deps; default main only +# OGSCOPE_APT_SLOW=1 — 分批安装 apt 包并在批次间暂停,减轻低配板内存压力 / Stagger apt for low-memory boards +# OGSCOPE_MIRROR=auto|cn|international — 软件源:auto 按语言/时区启发;中国大陆建议 cn 或保持 auto / Mirrors for CN vs abroad +# OGSCOPE_POETRY_INSTALLER_URL — 可选,覆盖 Poetry 引导脚本 URL(国内可自建镜像)/ Optional Poetry bootstrap URL mirror set -euo pipefail echo "======================================" -echo " OGScope 安装脚本" +echo " OGScope 安装脚本 / OGScope installation script" echo "======================================" -# 检查是否为 root / Check if it is root if [ "${EUID}" -eq 0 ]; then - echo "❌ 请不要使用 root 用户运行此脚本" + echo "❌ 请不要使用 root 用户运行此脚本 / Do not run as root" exit 1 fi -# 基本路径 / base path SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PROJECT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" SERVICE_NAME="ogscope" SERVICE_PATH="/etc/systemd/system/${SERVICE_NAME}.service" -echo "📁 项目目录: ${PROJECT_DIR}" +echo "📁 项目目录 / Project: ${PROJECT_DIR}" -# 检查项目结构 / Check project structure if [ ! -f "${PROJECT_DIR}/pyproject.toml" ]; then - echo "❌ 未找到 pyproject.toml,请在项目目录中执行此脚本" + echo "❌ 未找到 pyproject.toml / pyproject.toml not found" exit 1 fi cd "${PROJECT_DIR}" -# 更新系统 / Update system -echo "📦 更新系统包..." +# 加载镜像逻辑(apt / PyPI)/ Load mirror helpers for apt and PyPI +# shellcheck source=mirror.sh +source "${SCRIPT_DIR}/mirror.sh" + +# 识别发行版并要求 Debian 系 + apt,避免误操作 / Detect OS; require Debian family + apt for safety +if ! ogscope_load_os_release; then + exit 1 +fi +ogscope_print_os_summary +if ! ogscope_require_debian_family_apt; then + exit 1 +fi + +OGSCOPE_MIRROR_RESOLVED="$(ogscope_resolve_mirror)" +echo "🌐 镜像模式 / Mirror: ${OGSCOPE_MIRROR_RESOLVED}(OGSCOPE_MIRROR=${OGSCOPE_MIRROR:-auto})" + +if [ "${OGSCOPE_MIRROR_RESOLVED}" = "cn" ]; then + ogscope_apply_apt_mirror_cn +fi + +# 低配板可选在 apt 批次间暂停 / Optional pause between apt batches on low-RAM boards +_apt_pause() { + if [ "${OGSCOPE_APT_SLOW:-}" = "1" ]; then + echo "⏳ 等待 3s 释放内存... / Waiting to free memory..." + sleep 3 + fi +} + +echo "📦 apt update..." sudo apt update +_apt_pause -# 安装系统依赖 / Install system dependencies -echo "📦 安装系统依赖..." +echo "📦 安装基础系统包 / Installing base packages..." sudo apt install -y \ python3 \ python3-pip \ @@ -43,68 +73,128 @@ sudo apt install -y \ python3-dev \ git \ curl \ - build-essential \ + build-essential +_apt_pause + +echo "📦 安装图像与开发库 / Installing image and dev libraries..." +sudo apt install -y \ libopencv-dev \ libjpeg-dev \ libpng-dev \ - libfreetype6-dev \ - libatlas-base-dev \ - libspidev-dev \ - python3-picamera2 \ - python3-numpy + libfreetype6-dev +_apt_pause + +# 树莓派常见;Orange Pi 若无此包可忽略 / Raspberry Pi; skip if unavailable on Orange Pi +if apt-cache show python3-picamera2 >/dev/null 2>&1; then + echo "📦 安装 python3-picamera2..." + sudo apt install -y python3-picamera2 || echo "⚠️ picamera2 安装跳过 / picamera2 install skipped" +else + echo "ℹ️ 未找到 python3-picamera2 软件包,请按板卡文档安装相机栈 / No python3-picamera2 package" +fi +_apt_pause -# Python 版本提示 / Python version tips PY_VER="$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')" -echo "🐍 当前 python3 版本: ${PY_VER}" -echo "ℹ️ 项目要求: Python ^3.10(详见 pyproject.toml)" +echo "🐍 当前 python3 版本 / python3 version: ${PY_VER}" +echo "ℹ️ 项目要求 Python ^3.10(见 pyproject.toml)" -# 安装 Poetry / Install Poetry if ! command -v poetry >/dev/null 2>&1; then - echo "📦 安装 Poetry..." - curl -sSL https://install.python-poetry.org | python3 - + echo "📦 安装 Poetry(官方引导脚本;与 PEP 668 兼容)..." + echo "📦 Installing Poetry via official installer (PEP 668–safe)..." + # 国内外统一用官方脚本,避免在系统 Python 上 pip install poetry 触发 PEP 668 + # Same official bootstrap everywhere; avoids pip install poetry on managed system Python + _poetry_installer="${OGSCOPE_POETRY_INSTALLER_URL:-https://install.python-poetry.org}" + curl -sSL --retry 3 --connect-timeout 30 "${_poetry_installer}" | python3 - fi -# 设置 Poetry 路径 / Set Poetry path export PATH="${HOME}/.local/bin:${PATH}" if ! grep -q 'export PATH="$HOME/.local/bin:$PATH"' "${HOME}/.bashrc" 2>/dev/null; then echo 'export PATH="$HOME/.local/bin:$PATH"' >> "${HOME}/.bashrc" fi -# 验证 Poetry 安装 / Verify Poetry installation poetry --version >/dev/null -echo "✅ Poetry 已安装: $(poetry --version)" +echo "✅ Poetry: $(poetry --version)" + +# 强制使用项目虚拟环境,避免 PEP 668 与系统混装 / Force project venv (avoids PEP 668) +echo "⚙️ 配置 Poetry 虚拟环境 / Configuring Poetry virtualenvs..." +poetry config virtualenvs.create true +poetry config virtualenvs.in-project true +if poetry config virtualenvs.options.system-site-packages true 2>/dev/null; then + echo "✅ virtualenvs.options.system-site-packages = true(可与系统 picamera2 共存 / can see system picamera2)" +else + echo "⚠️ 当前 Poetry 可能不支持 system-site-packages,将仅依赖 PYTHONPATH / Poetry may lack system-site-packages; using PYTHONPATH only" +fi -# 安装 Python 依赖 / Install Python dependencies -echo "📦 安装 Python 依赖..." -poetry install --no-interaction +INSTALL_ARGS=(install --no-interaction) +if [ "${OGSCOPE_INSTALL_DEV:-}" = "1" ]; then + echo "📦 poetry install(含 dev)..." +else + INSTALL_ARGS+=(--only main) + echo "📦 poetry install --only main(生产默认;设 OGSCOPE_INSTALL_DEV=1 可装 dev)..." +fi + +# 低配板:限制并行 wheel 安装数,减轻峰值内存 / Limit parallel installs on low-RAM boards +export POETRY_INSTALLER_MAX_WORKERS="${POETRY_INSTALLER_MAX_WORKERS:-2}" + +if [ "${OGSCOPE_MIRROR_RESOLVED}" = "cn" ]; then + ogscope_export_pypi_mirror_cn +else + ogscope_export_pypi_mirror_international +fi + +poetry "${INSTALL_ARGS[@]}" + +# numpy/scipy 与 lock 一致;Poetry 偶发「无更新」但 wheel 未落盘 / Align deps with lock; retry if missing +if ! ogscope_verify_numpy_scipy; then + echo "⚠️ numpy/scipy 导入失败,使用 --no-cache 重试 poetry install / Import failed; retrying poetry with --no-cache" + poetry "${INSTALL_ARGS[@]}" --no-cache +fi +if ! ogscope_verify_numpy_scipy; then + echo "⚠️ 仍缺少 scipy,使用 pip 补装(与 pyproject 版本约束一致)/ scipy still missing; pip install (same constraints)" + poetry run pip install --no-cache-dir "scipy>=1.10,<1.17" +fi +if ! ogscope_verify_numpy_scipy; then + echo "❌ numpy/scipy 仍不可用。请删除 .venv 后重试: rm -rf .venv && ./scripts/install.sh" + echo "❌ Still failing. Try: rm -rf .venv && ./scripts/install.sh" + exit 1 +fi +echo "✅ numpy/scipy 已就绪 / numpy & scipy OK" -# 解析虚拟环境解释器路径 / Resolve virtual environment interpreter path VENV_PATH="$(poetry env info --path)" VENV_PYTHON="${VENV_PATH}/bin/python" if [ ! -x "${VENV_PYTHON}" ]; then - echo "❌ 未找到虚拟环境解释器: ${VENV_PYTHON}" + echo "❌ 未找到虚拟环境解释器 / venv python missing: ${VENV_PYTHON}" exit 1 fi -# 创建必要目录 / Create necessary directories -echo "📁 创建必要目录..." -mkdir -p logs data uploads +echo "📁 创建数据目录 / Creating data directories..." +mkdir -p logs data uploads data/plate_solve data/analysis -# 兼容不同发行版的系统 Python 包路径 / Compatible with system Python package paths of different distributions +# systemd 注入 PYTHONPATH,便于 venv 内 import apt 安装的包 / PYTHONPATH for apt-installed packages in venv PY_PATHS=() [ -d "/usr/lib/python3/dist-packages" ] && PY_PATHS+=("/usr/lib/python3/dist-packages") -[ -d "/usr/local/lib/python3.13/dist-packages" ] && PY_PATHS+=("/usr/local/lib/python3.13/dist-packages") -[ -d "/usr/local/lib/python3.12/dist-packages" ] && PY_PATHS+=("/usr/local/lib/python3.12/dist-packages") -[ -d "/usr/local/lib/python3.11/dist-packages" ] && PY_PATHS+=("/usr/local/lib/python3.11/dist-packages") -[ -d "/usr/local/lib/python3.10/dist-packages" ] && PY_PATHS+=("/usr/local/lib/python3.10/dist-packages") +# 动态加入 /usr/local/lib/pythonX.Y/dist-packages(若存在)/ Add /usr/local dist-packages if present +for _py in 13 12 11 10; do + _d="/usr/local/lib/python3.${_py}/dist-packages" + [ -d "${_d}" ] && PY_PATHS+=("${_d}") +done PYTHONPATH_VALUE="$(IFS=:; echo "${PY_PATHS[*]}")" [ -z "${PYTHONPATH_VALUE}" ] && PYTHONPATH_VALUE="/usr/lib/python3/dist-packages" -LD_LIBRARY_PATH_VALUE="/usr/lib/aarch64-linux-gnu" +# libcamera 等动态库路径(按架构探测)/ Dynamic linker paths for libcamera etc. (arch-detected) +LD_PARTS=() +for _ld in /usr/lib/aarch64-linux-gnu /usr/lib/arm-linux-gnueabihf; do + [ -d "${_ld}" ] && LD_PARTS+=("${_ld}") +done +LD_LIBRARY_PATH_VALUE="$(IFS=:; echo "${LD_PARTS[*]}")" +if [ -z "${LD_LIBRARY_PATH_VALUE}" ]; then + LD_LIBRARY_PATH_VALUE="/usr/lib/aarch64-linux-gnu" + echo "⚠️ 未检测到标准库目录,使用默认 ${LD_LIBRARY_PATH_VALUE} / No lib dir found; using default aarch64 path" +fi -# 生成 systemd 服务 / Generate systemd service -echo "⚙️ 配置 systemd 服务: ${SERVICE_PATH}" +# ExecStart 使用 poetry env info --path(与 virtualenvs.in-project=true 时即项目 .venv),勿手写 ~/.virtualenvs/ +# ExecStart uses poetry env path (project .venv when in-project=true); do not hardcode ~/.virtualenvs/ +echo "⚙️ 写入 systemd: ${SERVICE_PATH}" sudo tee "${SERVICE_PATH}" >/dev/null <&2 + return 1 + fi + # shellcheck disable=SC1091 + . /etc/os-release + export OGSCOPE_OS_ID="${ID:-unknown}" + export OGSCOPE_OS_VERSION_ID="${VERSION_ID:-}" + export OGSCOPE_OS_VERSION_CODENAME="${VERSION_CODENAME:-}" + export OGSCOPE_OS_PRETTY_NAME="${PRETTY_NAME:-}" + export OGSCOPE_OS_ID_LIKE="${ID_LIKE:-}" + export OGSCOPE_OS_VARIANT="${VARIANT:-}" + export OGSCOPE_OS_VARIANT_ID="${VARIANT_ID:-}" + return 0 +} + +# 是否为 apt + Debian 系(含 Raspberry Pi OS、Ubuntu、Armbian 等)/ True if apt-based Debian family +# Raspberry Pi OS 通常 ID=debian;旧版可能为 raspbian / RPi OS is usually ID=debian; older may be raspbian +ogscope_is_debian_family() { + case "${OGSCOPE_OS_ID:-}" in + debian | ubuntu | raspbian | linuxmint | pop | zorin | kali) + return 0 + ;; + esac + case ",${OGSCOPE_OS_ID_LIKE:-}," in + *,debian,*) return 0 ;; + *,ubuntu,*) return 0 ;; + esac + return 1 +} + +# 安装脚本入口:非 Debian 系则退出,避免误改软件源 / Abort install on non-Debian systems (safety) +ogscope_require_debian_family_apt() { + if ! ogscope_is_debian_family; then + echo "❌ 本脚本仅支持 Debian/Ubuntu 系发行版(含 Raspberry Pi OS、Orange Pi Debian 镜像)。" >&2 + echo "❌ This installer only supports Debian/Ubuntu family (incl. Raspberry Pi OS, Armbian Debian)." >&2 + echo " 当前 ID=${OGSCOPE_OS_ID:-?} ID_LIKE=${OGSCOPE_OS_ID_LIKE:-?} / Current OS ID shown above." >&2 + return 1 + fi + if ! command -v apt >/dev/null 2>&1 && ! command -v apt-get >/dev/null 2>&1; then + echo "❌ 未找到 apt/apt-get / apt not found" >&2 + return 1 + fi + return 0 +} + +# 打印已识别系统(中英)/ Print detected OS (bilingual) +ogscope_print_os_summary() { + echo "🖥️ 发行版 / OS: ${OGSCOPE_OS_PRETTY_NAME:-${OGSCOPE_OS_ID:-unknown}}" + echo " ID=${OGSCOPE_OS_ID:-?} VERSION_ID=${OGSCOPE_OS_VERSION_ID:-?} CODENAME=${OGSCOPE_OS_VERSION_CODENAME:-?}" + if [ -n "${OGSCOPE_OS_VARIANT:-}" ]; then + echo " VARIANT=${OGSCOPE_OS_VARIANT:-} / VARIANT_ID=${OGSCOPE_OS_VARIANT_ID:-}" + fi + if [ -f /proc/device-tree/model ]; then + echo " 硬件型号 / Hardware: $(tr -d '\0' /dev/null || echo '?')" + fi +} + +# 解析镜像模式,标准输出为 cn 或 international / Resolve mode; prints cn or international +ogscope_resolve_mirror() { + local m="${OGSCOPE_MIRROR:-auto}" + case "${m}" in + cn | CN | china | China) + echo cn + return 0 + ;; + # 境外 / Outside mainland China (explicit) + international | global | intl | default | us | US | eu | EU) + echo international + return 0 + ;; + auto | "") + ;; + *) + echo "⚠️ 未知 OGSCOPE_MIRROR=${m},按 auto / Unknown OGSCOPE_MIRROR, using auto" >&2 + ;; + esac + + case "${LANG:-}" in *zh_CN*) echo cn && return 0 ;; esac + case "${LC_ALL:-}" in *zh_CN*) echo cn && return 0 ;; esac + case "${LC_MESSAGES:-}" in *zh_CN*) echo cn && return 0 ;; esac + + # 时区启发 / Timezone heuristic (common China zones) + local tz="" + if [ -r /etc/timezone ]; then + tz="$(tr -d '\r\n' /dev/null 2>&1; then + tz="$(timedatectl show -p Timezone --value 2>/dev/null || true)" + fi + case "${tz}" in + Asia/Shanghai | Asia/Chongqing | Asia/Harbin | Asia/Urumqi | Asia/Hong_Kong | Asia/Macau | Asia/Taipei) + echo cn + return 0 + ;; + esac + + echo international +} + +# 导出中国大陆 PyPI 环境变量(清华)/ Export env for Tsinghua PyPI mirror +ogscope_export_pypi_mirror_cn() { + export PIP_INDEX_URL="https://pypi.tuna.tsinghua.edu.cn/simple" + export PIP_TRUSTED_HOST="pypi.tuna.tsinghua.edu.cn" + export UV_INDEX_URL="https://pypi.tuna.tsinghua.edu.cn/simple" + # Poetry / urllib 部分场景会读 REQUESTS_*;延长超时利于弱网 / Longer timeout for slow links + export POETRY_REQUESTS_TIMEOUT="${POETRY_REQUESTS_TIMEOUT:-120}" +} + +# 取消国内 PyPI 覆盖,使用默认官方索引 / Unset CN overrides; use default PyPI +ogscope_export_pypi_mirror_international() { + unset PIP_INDEX_URL PIP_TRUSTED_HOST UV_INDEX_URL || true +} + +# 将 apt 源替换为清华镜像(需 sudo)/ Replace apt sources with Tsinghua mirror (requires sudo) +ogscope_apply_apt_mirror_cn() { + local stamp + stamp="$(date +%s)" + echo "🌏 配置 apt 使用中国大陆镜像(清华)… / Configuring apt for China mirror (Tsinghua)…" + + if [ ! -d /etc/apt ]; then + echo "⚠️ 未找到 /etc/apt,跳过 apt 镜像 / No /etc/apt, skipping" + return 0 + fi + + sudo cp -a /etc/apt/sources.list "/etc/apt/sources.list.bak.ogscope.${stamp}" 2>/dev/null || true + if [ -d /etc/apt/sources.list.d ]; then + sudo find /etc/apt/sources.list.d -maxdepth 1 -type f \( -name '*.list' -o -name '*.sources' \) -exec \ + cp -a {} {}.bak.ogscope."${stamp}" \; 2>/dev/null || true + fi + + # Ubuntu / Ubuntu ports / Debian 常见写法 / Common Ubuntu & Debian patterns + sudo find /etc/apt -type f \( -name 'sources.list' -o -name '*.list' -o -name '*.sources' \) -print0 2>/dev/null | + while IFS= read -r -d '' f; do + [ -z "${f}" ] && continue + sudo sed -i \ + -e 's|http://archive.ubuntu.com/ubuntu|https://mirrors.tuna.tsinghua.edu.cn/ubuntu|g' \ + -e 's|https://archive.ubuntu.com/ubuntu|https://mirrors.tuna.tsinghua.edu.cn/ubuntu|g' \ + -e 's|http://ports.ubuntu.com/ubuntu-ports|https://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports|g' \ + -e 's|https://ports.ubuntu.com/ubuntu-ports|https://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports|g' \ + -e 's|http://security.ubuntu.com/ubuntu|https://mirrors.tuna.tsinghua.edu.cn/ubuntu|g' \ + -e 's|https://security.ubuntu.com/ubuntu|https://mirrors.tuna.tsinghua.edu.cn/ubuntu|g' \ + -e 's|http://deb.debian.org/debian|https://mirrors.tuna.tsinghua.edu.cn/debian|g' \ + -e 's|https://deb.debian.org/debian|https://mirrors.tuna.tsinghua.edu.cn/debian|g' \ + -e 's|http://security.debian.org/debian-security|https://mirrors.tuna.tsinghua.edu.cn/debian-security|g' \ + -e 's|https://security.debian.org/debian-security|https://mirrors.tuna.tsinghua.edu.cn/debian-security|g' \ + "${f}" 2>/dev/null || true + done + + echo "✅ apt 镜像已写入(已备份 *.bak.ogscope.${stamp})/ Apt mirror applied (backups created)" +} + +# 验证 venv 中 numpy/scipy 可导入 / Verify numpy & scipy import (catches stale Poetry state) +# Poetry 有时显示「无依赖更新」但大 wheel 未实际安装 / Poetry may skip while wheels missing +ogscope_verify_numpy_scipy() { + poetry run python -c "import numpy, scipy" 2>/dev/null +} + +# 若 systemd 已存在但 ExecStart 不是当前 Poetry venv,则修正(避免 ~/.virtualenvs/ 与项目 .venv 混用) +# If unit exists but ExecStart points elsewhere than current Poetry venv, fix it (avoids ~/.virtualenvs vs .venv mismatch) +# 参数 / Args: $1 = unit 文件路径 / unit file path, $2 = venv 内 python 可执行文件绝对路径 / absolute path to venv python +ogscope_sync_systemd_execstart_if_needed() { + local unit_path="${1:?}" + local venv_python="${2:?}" + local expected_line="ExecStart=${venv_python} -m ogscope.main" + + if [ ! -f "${unit_path}" ]; then + echo "ℹ️ 未找到 ${unit_path},跳过 ExecStart 同步(请先运行 install.sh)/ No unit; skip sync (run install.sh first)" + return 0 + fi + if [ ! -x "${venv_python}" ]; then + echo "❌ 解释器不可执行 / Python not executable: ${venv_python}" >&2 + return 1 + fi + + local cur_line + cur_line="$(grep '^ExecStart=' "${unit_path}" | head -n1 || true)" + if [ -z "${cur_line}" ]; then + echo "❌ ${unit_path} 中无 ExecStart / No ExecStart in unit" >&2 + return 1 + fi + + if [ "${cur_line}" = "${expected_line}" ]; then + echo "✅ systemd ExecStart 与当前 Poetry venv 一致 / ExecStart matches Poetry venv" + return 0 + fi + + echo "⚙️ 修正 systemd ExecStart(曾指向旧虚拟环境路径)/ Fixing ExecStart (was stale venv path)" + echo " 旧 / Old: ${cur_line}" + echo " 新 / New: ${expected_line}" + sudo sed -i "s|^ExecStart=.*|${expected_line}|" "${unit_path}" + echo "✅ 已更新 ${unit_path} / Unit updated" +} diff --git a/scripts/sync_dev_board.sh b/scripts/sync_dev_board.sh new file mode 100755 index 0000000..f2f979a --- /dev/null +++ b/scripts/sync_dev_board.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# 本地 build 后同步到开发板 / Build locally then rsync to dev board +# 用法 / Usage: +# export OGSCOPE_DEV_USER=ogstartech +# export OGSCOPE_DEV_HOST=192.168.31.16 +# export OGSCOPE_DEV_PATH=/path/to/OGScope +# ./scripts/sync_dev_board.sh +set -euo pipefail +ROOT="$(cd "$(dirname "$0")/.." && pwd)" +cd "$ROOT/web/analysis-ui" +npm run build +if [[ -z "${OGSCOPE_DEV_HOST:-}" || -z "${OGSCOPE_DEV_PATH:-}" ]]; then + echo "请设置 OGSCOPE_DEV_HOST 与 OGSCOPE_DEV_PATH(可选 OGSCOPE_DEV_USER)" >&2 + echo "Set OGSCOPE_DEV_HOST and OGSCOPE_DEV_PATH (optional OGSCOPE_DEV_USER)" >&2 + exit 1 +fi +RSYNC_TARGET="${OGSCOPE_DEV_USER:+$OGSCOPE_DEV_USER@}$OGSCOPE_DEV_HOST:$OGSCOPE_DEV_PATH" +rsync -avz --delete \ + "$ROOT/web/static/analysis-lab/" \ + "$RSYNC_TARGET/web/static/analysis-lab/" +echo "已同步 web/static/analysis-lab/ -> $RSYNC_TARGET/web/static/analysis-lab/" +echo "可选 / Optional: ssh $RSYNC_TARGET 'sudo systemctl restart ogscope'" diff --git a/scripts/uninstall.sh b/scripts/uninstall.sh new file mode 100755 index 0000000..16a0ffa --- /dev/null +++ b/scripts/uninstall.sh @@ -0,0 +1,111 @@ +#!/bin/bash +# OGScope 卸载脚本 / OGScope uninstall script +# 从本机移除 systemd 服务与(可选)项目虚拟环境;不卸载 apt 包与全局 Poetry / Removes service and optional venv; does not remove apt packages or global Poetry +# +# 环境变量 / Environment: +# OGSCOPE_UNINSTALL_CONFIRM=1 — 必须设置,否则脚本退出(防误删)/ Must be set to proceed (safety) +# OGSCOPE_UNINSTALL_KEEP_VENV=1 — 保留项目 .venv / Keep project virtualenv +# OGSCOPE_UNINSTALL_REMOVE_DATA=1 — 同时删除 logs/、uploads/、data/ 下内容(危险)/ Also remove logs, uploads, data (destructive) +# OGSCOPE_UNINSTALL_REMOVE_LEGACY_POETRY_VENV=1 — 删除旧版 Poetry 全局名 venv:~/.virtualenvs/OGScope(若存在)/ Remove legacy Poetry venv at ~/.virtualenvs/OGScope if present + +set -euo pipefail + +if [ "${EUID}" -eq 0 ]; then + echo "❌ 请不要使用 root 用户运行此脚本 / Do not run as root" + exit 1 +fi + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" +SERVICE_NAME="ogscope" +SERVICE_PATH="/etc/systemd/system/${SERVICE_NAME}.service" + +echo "======================================" +echo " OGScope 卸载 / OGScope uninstall" +echo "======================================" +echo "📁 项目目录 / Project: ${PROJECT_DIR}" + +if [ ! -f "${PROJECT_DIR}/pyproject.toml" ]; then + echo "❌ 未找到 pyproject.toml / pyproject.toml not found" + exit 1 +fi + +# 确认 / Confirmation +if [ "${OGSCOPE_UNINSTALL_CONFIRM:-}" != "1" ]; then + if [ -t 0 ] && [ -t 1 ]; then + echo "" + echo "⚠️ 将停止并移除 systemd 服务 ${SERVICE_NAME},并可选删除 .venv。" + echo "⚠️ Will stop and remove systemd service ${SERVICE_NAME}, and optionally remove .venv." + echo " 数据目录默认保留;设 OGSCOPE_UNINSTALL_REMOVE_DATA=1 可删除 logs/uploads/data。" + echo " Data dirs are kept by default; set OGSCOPE_UNINSTALL_REMOVE_DATA=1 to remove them." + echo "" + read -r -p "输入 YES 继续 / Type YES to continue: " _ans + if [ "${_ans}" != "YES" ]; then + echo "已取消 / Aborted." + exit 0 + fi + else + echo "❌ 非交互环境请设置: OGSCOPE_UNINSTALL_CONFIRM=1 / For non-interactive runs, set OGSCOPE_UNINSTALL_CONFIRM=1" + exit 1 + fi +fi + +cd "${PROJECT_DIR}" + +# 停止并禁用服务 / Stop and disable service +echo "🛑 停止服务 / Stopping service..." +sudo systemctl stop "${SERVICE_NAME}" 2>/dev/null || true +sudo systemctl disable "${SERVICE_NAME}" 2>/dev/null || true + +if [ -f "${SERVICE_PATH}" ]; then + echo "🗑️ 移除 unit 文件 / Removing unit file: ${SERVICE_PATH}" + sudo rm -f "${SERVICE_PATH}" + sudo systemctl daemon-reload + echo "✅ systemd 已更新 / systemd reloaded" +else + echo "ℹ️ 未找到 ${SERVICE_PATH},跳过删除 unit / Unit file not found, skipping" + sudo systemctl daemon-reload 2>/dev/null || true +fi + +# 虚拟环境 / Virtualenv +if [ "${OGSCOPE_UNINSTALL_KEEP_VENV:-}" = "1" ]; then + echo "ℹ️ 保留 .venv(OGSCOPE_UNINSTALL_KEEP_VENV=1)/ Keeping .venv" +elif [ -d "${PROJECT_DIR}/.venv" ]; then + echo "🗑️ 删除虚拟环境 / Removing .venv..." + rm -rf "${PROJECT_DIR}/.venv" + echo "✅ .venv 已删除 / .venv removed" +else + echo "ℹ️ 无 .venv 目录 / No .venv directory" +fi + +# 旧版安装曾将 venv 放在 ~/.virtualenvs/OGScope,与当前「项目内 .venv」并存易混淆;可选删除 / Legacy global venv name; optional cleanup +if [ "${OGSCOPE_UNINSTALL_REMOVE_LEGACY_POETRY_VENV:-}" = "1" ]; then + _legacy_venv="${HOME}/.virtualenvs/OGScope" + if [ -d "${_legacy_venv}" ]; then + echo "🗑️ 删除遗留 Poetry 虚拟环境 / Removing legacy Poetry venv: ${_legacy_venv}" + rm -rf "${_legacy_venv}" + echo "✅ 已删除 / Removed" + else + echo "ℹ️ 无 ${_legacy_venv} / No legacy venv at that path" + fi +else + echo "ℹ️ 若存在旧路径 ~/.virtualenvs/OGScope,可设 OGSCOPE_UNINSTALL_REMOVE_LEGACY_POETRY_VENV=1 一并删除 / Optional: remove legacy ~/.virtualenvs/OGScope" +fi + +# 用户数据(可选)/ Optional user data +if [ "${OGSCOPE_UNINSTALL_REMOVE_DATA:-}" = "1" ]; then + echo "🗑️ 删除 logs、uploads、data(OGSCOPE_UNINSTALL_REMOVE_DATA=1)..." + echo "🗑️ Removing logs, uploads, data..." + rm -rf "${PROJECT_DIR}/logs" "${PROJECT_DIR}/uploads" "${PROJECT_DIR}/data" 2>/dev/null || true + echo "✅ 数据目录已清理 / Data dirs removed" +else + echo "ℹ️ 保留 logs/、uploads/、data/(不设 REMOVE_DATA 则保留)/ Keeping logs, uploads, data" +fi + +echo "" +echo "======================================" +echo " ✅ 卸载完成 / Uninstall done" +echo "======================================" +echo "未移除:系统 apt 包、python3-picamera2、全局 Poetry / Not removed: apt packages, picamera2, global Poetry" +echo "若需重装:./scripts/install.sh / To reinstall: ./scripts/install.sh" +echo "" diff --git a/tests/conftest.py b/tests/conftest.py index 666d3f6..1d75ab7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,9 +1,11 @@ """ Pytest 配置和共享 fixtures """ + +from pathlib import Path + import pytest from fastapi.testclient import TestClient -from pathlib import Path from ogscope.web.app import app @@ -26,6 +28,12 @@ def temp_debug_dir(monkeypatch, tmp_path: Path): monkeypatch.setattr(debug_services, "camera_instance", None) monkeypatch.setattr(debug_services, "is_recording", False) monkeypatch.setattr(debug_services, "recording_task", None) + monkeypatch.setattr(debug_services, "recording_stem", None) + monkeypatch.setattr(debug_services, "recording_t0_mono", None) + monkeypatch.setattr(debug_services, "recording_fps_value", 15.0) + monkeypatch.setattr(debug_services, "recording_media_filename", None) + monkeypatch.setattr(debug_services, "recording_codec_fourcc", "mp4v") + monkeypatch.setattr(debug_services, "recording_container", "MP4") monkeypatch.setattr(debug_services, "latest_preview_jpeg", None) monkeypatch.setattr(debug_services, "last_preview_time", None) monkeypatch.setattr(debug_services, "latest_preview_id", 0) @@ -35,13 +43,90 @@ def temp_debug_dir(monkeypatch, tmp_path: Path): @pytest.fixture -def temp_catalog_dir(tmp_path: Path): - """重定向星表目录到临时路径 / Redirect catalog directory to temp path.""" - from ogscope.data.catalog.service import catalog_service - - catalog_root = tmp_path / "catalog" - catalog_service.reconfigure_storage(catalog_root) - return catalog_root +def mock_plate_solve(monkeypatch): + """避免测试依赖 default_database.npz / Avoid tests requiring default_database.npz.""" + + def _fake_solve(self, stars, frame_shape, **kwargs): + from ogscope.algorithms.plate_solve.solver import SolveResult + + return SolveResult( + ra_deg=12.0, + dec_deg=80.0, + detected_stars=len(stars), + solve_source="full", + status="MATCH_FOUND", + status_code=1, + roll_deg=0.0, + fov_deg=16.0, + matches=min(8, len(stars)), + prob=0.001, + rmse_arcsec=10.0, + t_solve_ms=5.0, + t_extract_ms=None, + t_preprocess_ms=1.0, + raw={}, + solve_overlay={ + "frame_shape": [480, 640], + "stars_matched": [ + { + "x": 100.0, + "y": 200.0, + "ra_deg": 12.0, + "dec_deg": 80.0, + "mag": 5.2, + }, + ], + "stars_pattern": [{"x": 110.0, "y": 210.0}], + "stars_all_centroids": [ + {"x": 100.0, "y": 200.0}, + {"x": 300.0, "y": 400.0}, + ], + }, + ) + + def _fake_solve_from_bgr(self, frame_bgr, max_stars, **kwargs): + from ogscope.algorithms.plate_solve.solver import SolveResult + + return SolveResult( + ra_deg=12.0, + dec_deg=80.0, + detected_stars=8, + solve_source="full", + status="MATCH_FOUND", + status_code=1, + roll_deg=0.0, + fov_deg=16.0, + matches=6, + prob=0.001, + rmse_arcsec=10.0, + t_solve_ms=5.0, + t_extract_ms=1.0, + t_preprocess_ms=1.0, + raw={}, + solve_overlay={ + "frame_shape": [480, 640], + "stars_matched": [ + { + "x": 320.0, + "y": 240.0, + "ra_deg": 12.0, + "dec_deg": 80.0, + "mag": 4.5, + }, + ], + "stars_pattern": [{"x": 315.0, "y": 235.0}], + "stars_all_centroids": [{"x": 320.0, "y": 240.0}], + }, + ) + + monkeypatch.setattr( + "ogscope.algorithms.plate_solve.solver.PlateSolver.solve", + _fake_solve, + ) + monkeypatch.setattr( + "ogscope.algorithms.plate_solve.solver.PlateSolver.solve_from_bgr_frame", + _fake_solve_from_bgr, + ) @pytest.fixture @@ -61,5 +146,12 @@ def temp_analysis_dir(tmp_path: Path): analysis_service.jobs_root = jobs_root analysis_service.results_root = results_root analysis_service._jobs = {} + # 与实验室清单/实验记录目录一致 / Align lab manifest & experiments with temp dirs + lab = analysis_service._lab + lab.upload_root = upload_root + lab.experiments_root = analysis_root / "experiments" + lab.presets_official = analysis_root / "presets" / "official" + lab.presets_user = analysis_root / "presets" / "user" + for p in (lab.experiments_root, lab.presets_official, lab.presets_user): + p.mkdir(parents=True, exist_ok=True) return analysis_root - diff --git a/tests/integration/test_analysis_pipeline.py b/tests/integration/test_analysis_pipeline.py index efe380a..deffe04 100644 --- a/tests/integration/test_analysis_pipeline.py +++ b/tests/integration/test_analysis_pipeline.py @@ -18,23 +18,10 @@ def _make_frame(path: Path) -> None: @pytest.mark.integration -def test_end_to_end_catalog_and_image_analysis( - client, temp_catalog_dir, temp_analysis_dir, tmp_path: Path +def test_end_to_end_image_analysis( + client, temp_analysis_dir, mock_plate_solve, tmp_path: Path ): - """验证星表到单图解算全链路 / Validate end-to-end catalog to single-image solving.""" - resp_download = client.post( - "/api/catalog/download", - json={"source": "seed", "magnitude_limit": 8.5}, - ) - assert resp_download.status_code == 200 - - resp_index = client.post( - "/api/catalog/build-index", - json={"magnitude_limit": 8.5, "ra_bin_size_deg": 15.0}, - ) - assert resp_index.status_code == 200 - assert resp_index.json()["status"] == "ready" - + """验证上传与单图解算全链路 / Validate upload to single-image solving.""" image = tmp_path / "integration_stars.jpg" _make_frame(image) with image.open("rb") as f: @@ -46,9 +33,13 @@ def test_end_to_end_catalog_and_image_analysis( resp_solve = client.post( "/api/analysis/solve/image", - params={"input_name": "integration_stars.jpg", "hint_ra_deg": 31.0, "hint_dec_deg": 88.0}, + json={ + "input_name": "integration_stars.jpg", + "hint_ra_deg": 31.0, + "hint_dec_deg": 88.0, + }, ) assert resp_solve.status_code == 200 payload = resp_solve.json() assert payload["success"] is True - assert payload["result"]["solve_source"] in {"full", "track"} + assert payload["result"]["solve_source"] == "full" diff --git a/tests/unit/test_analysis_api.py b/tests/unit/test_analysis_api.py index 7e9ff94..030ef65 100644 --- a/tests/unit/test_analysis_api.py +++ b/tests/unit/test_analysis_api.py @@ -35,11 +35,10 @@ def _build_test_video(path: Path) -> None: @pytest.mark.unit -def test_analysis_upload_and_single_image_solve(client, temp_analysis_dir, temp_catalog_dir, tmp_path: Path): +def test_analysis_upload_and_single_image_solve( + client, temp_analysis_dir, mock_plate_solve, tmp_path: Path +): """测试上传与单图解算 / Test upload and single-image solve.""" - client.post("/api/catalog/download", json={"source": "seed"}) - client.post("/api/catalog/build-index", json={"magnitude_limit": 8.5}) - image_path = tmp_path / "stars.jpg" _build_star_image(image_path) with image_path.open("rb") as f: @@ -50,9 +49,28 @@ def test_analysis_upload_and_single_image_solve(client, temp_analysis_dir, temp_ assert upload_resp.status_code == 200 assert upload_resp.json()["filename"] == "stars.jpg" + list_resp = client.get("/api/analysis/uploads") + assert list_resp.status_code == 200 + payload = list_resp.json() + assert "upload_dir" in payload + assert "files" in payload + names = [f["filename"] for f in payload["files"]] + assert "stars.jpg" in names + + file_resp = client.get( + "/api/analysis/uploads/file", params={"filename": "stars.jpg"} + ) + assert file_resp.status_code == 200 + assert len(file_resp.content) > 0 + solve_resp = client.post( "/api/analysis/solve/image", - params={"input_name": "stars.jpg", "hint_ra_deg": 45.0, "hint_dec_deg": 70.0}, + json={ + "input_name": "stars.jpg", + "hint_ra_deg": 45.0, + "hint_dec_deg": 70.0, + "centroid": {"sigma": 2.5, "max_area": 400}, + }, ) assert solve_resp.status_code == 200 solve_data = solve_resp.json() @@ -60,15 +78,56 @@ def test_analysis_upload_and_single_image_solve(client, temp_analysis_dir, temp_ result = solve_data["result"] assert "ra_deg" in result assert "dec_deg" in result - assert "confidence" in result + assert "status" in result @pytest.mark.unit -def test_analysis_video_job(client, temp_analysis_dir, temp_catalog_dir, tmp_path: Path): - """测试视频任务分析 / Test video job analysis.""" - client.post("/api/catalog/download", json={"source": "seed"}) - client.post("/api/catalog/build-index", json={"magnitude_limit": 8.5}) +def test_analysis_extract_preview( + client, temp_analysis_dir, monkeypatch, tmp_path: Path +): + """提星掩膜预览接口 / Extract preview endpoint smoke test.""" + image_path = tmp_path / "stars2.jpg" + _build_star_image(image_path) + with image_path.open("rb") as f: + upload_resp = client.post( + "/api/analysis/upload", + files={"file": ("stars2.jpg", f, "image/jpeg")}, + ) + assert upload_resp.status_code == 200 + + def _fake_preview(*_a: object, **_kw: object) -> dict: + return { + "success": True, + "detected_stars": 5, + "t_extract_ms": 10.0, + "binary_mask_png_base64": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==", + "solve_width": 320, + "solve_height": 480, + "original_width": 320, + "original_height": 480, + } + monkeypatch.setattr( + "ogscope.web.api.analysis.services.centroid_extraction_preview", + _fake_preview, + ) + prev_resp = client.post( + "/api/analysis/extract/preview", + json={"input_name": "stars2.jpg", "max_image_side": 2048}, + ) + assert prev_resp.status_code == 200 + data = prev_resp.json() + assert data.get("success") is True + assert data.get("detected_stars") == 5 + assert data.get("t_extract_ms") == 10.0 + assert data.get("binary_mask_png_base64") + + +@pytest.mark.unit +def test_analysis_video_job( + client, temp_analysis_dir, mock_plate_solve, tmp_path: Path +): + """测试视频任务分析 / Test video job analysis.""" video_path = tmp_path / "stars.mp4" _build_test_video(video_path) with video_path.open("rb") as f: @@ -102,3 +161,123 @@ def test_analysis_video_job(client, temp_analysis_dir, temp_catalog_dir, tmp_pat result_data = result_resp.json() assert result_data["job_id"] == job_data["job_id"] assert len(result_data["results"]) > 0 + + +@pytest.mark.unit +def test_analysis_list_presets_and_batch( + client, temp_analysis_dir, mock_plate_solve, tmp_path: Path +): + """预设列表与批量解算 / Presets list and batch solve.""" + image_path = tmp_path / "batch.jpg" + _build_star_image(image_path) + with image_path.open("rb") as f: + up = client.post( + "/api/analysis/upload", + files={"file": ("batch.jpg", f, "image/jpeg")}, + ) + assert up.status_code == 200 + + pr = client.get("/api/analysis/presets", params={"scope": "user"}) + assert pr.status_code == 200 + assert "presets" in pr.json() + + create = client.post( + "/api/analysis/presets", + json={ + "name": "test-preset", + "params": {"fov_estimate": 16.0, "solve_timeout_ms": 8000}, + }, + ) + assert create.status_code == 200 + pid = create.json()["id"] + + batch = client.post( + "/api/analysis/solve/batch", + json={ + "input_name": "batch.jpg", + "runs": [ + {"label": "A", "params": {"fov_estimate": 16.0}}, + {"label": "B", "params": {"fov_estimate": 15.0}}, + ], + }, + ) + assert batch.status_code == 200 + bj = batch.json() + assert bj["input_name"] == "batch.jpg" + assert len(bj["results"]) == 2 + + exp = client.post( + "/api/analysis/experiments", + json={ + "input_name": "batch.jpg", + "preset_label": "A", + "result_json": {"ok": True}, + "metrics": {"matches": 1}, + }, + ) + assert exp.status_code == 200 + + el = client.get("/api/analysis/experiments", params={"page": 1, "page_size": 10}) + assert el.status_code == 200 + assert el.json()["total"] >= 1 + + dl = client.delete(f"/api/analysis/presets/{pid}") + assert dl.status_code == 200 + + +@pytest.mark.unit +def test_analysis_upload_file_info_sidecar(client, temp_analysis_dir, tmp_path: Path): + """上传素材 info 接口合并 stem.txt / Upload info merges sidecar JSON.""" + image_path = tmp_path / "cap.jpg" + _build_star_image(image_path) + with image_path.open("rb") as f: + up = client.post( + "/api/analysis/upload", + files={"file": ("cap.jpg", f, "image/jpeg")}, + ) + assert up.status_code == 200 + side = temp_analysis_dir / "uploads" / "cap.txt" + side.write_text( + '{"camera": {"exposure_us": 5000, "output_width": 640, "output_height": 480}}', + encoding="utf-8", + ) + info_resp = client.get("/api/analysis/uploads/cap.jpg/info") + assert info_resp.status_code == 200 + data = info_resp.json() + assert data.get("exposure_us") == 5000 + assert "640x480" in str(data.get("resolution", "")) + + +@pytest.mark.unit +def test_analysis_delete_upload_and_experiment( + client, temp_analysis_dir, tmp_path: Path +): + """删除素材与实验记录 / Delete upload and experiment.""" + image_path = tmp_path / "del.jpg" + _build_star_image(image_path) + with image_path.open("rb") as f: + up = client.post( + "/api/analysis/upload", + files={"file": ("del.jpg", f, "image/jpeg")}, + ) + assert up.status_code == 200 + assert (temp_analysis_dir / "uploads" / "del.jpg").is_file() + + dr = client.delete("/api/analysis/uploads/del.jpg") + assert dr.status_code == 200 + assert not (temp_analysis_dir / "uploads" / "del.jpg").is_file() + + exp = client.post( + "/api/analysis/experiments", + json={ + "input_name": "x.jpg", + "preset_label": "t", + "result_json": {"ok": True}, + "metrics": {"matches": 0}, + }, + ) + assert exp.status_code == 200 + eid = exp.json()["id"] + er = client.delete(f"/api/analysis/experiments/{eid}") + assert er.status_code == 200 + assert not (temp_analysis_dir / "experiments" / f"{eid}.json").is_file() diff --git a/tests/unit/test_api.py b/tests/unit/test_api.py index b751483..d8df904 100644 --- a/tests/unit/test_api.py +++ b/tests/unit/test_api.py @@ -1,6 +1,7 @@ """ Web API 单元测试 """ + import pytest @@ -15,11 +16,11 @@ def test_root(client): @pytest.mark.unit def test_debug_analysis_page(client): - """测试星图解算调试页面。 / Test plate solve debug page.""" + """测试星空解算控制台页面。 / Test plate solve console page.""" response = client.get("/debug/analysis") assert response.status_code == 200 assert "text/html" in response.headers.get("content-type", "") - assert "星图解算工作台" in response.text + assert "星空解算" in response.text @pytest.mark.unit @@ -42,7 +43,6 @@ def test_app_api_root(client): assert data["docs"] == "/docs" - @pytest.mark.unit def test_camera_status(client): """测试获取相机状态接口结构。 / Test to obtain the camera status interface structure.""" @@ -70,4 +70,3 @@ def test_system_info(client): assert "wifi_interface" in data assert "uptime_seconds" in data assert "load_average_1m" in data - diff --git a/tests/unit/test_catalog_api.py b/tests/unit/test_catalog_api.py deleted file mode 100644 index 5d9e258..0000000 --- a/tests/unit/test_catalog_api.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -星表 API 测试 / Catalog API tests -""" - -from pathlib import Path - -import pytest - - -@pytest.mark.unit -def test_catalog_download_build_and_status(client, temp_catalog_dir): - """测试星表下载、建索引和状态查询 / Test catalog download, build index and status.""" - download_resp = client.post( - "/api/catalog/download", - json={"source": "seed", "magnitude_limit": 8.5}, - ) - assert download_resp.status_code == 200 - download_data = download_resp.json() - assert download_data["success"] is True - assert "path" in download_data - - build_resp = client.post( - "/api/catalog/build-index", - json={"magnitude_limit": 8.5, "ra_bin_size_deg": 30.0}, - ) - assert build_resp.status_code == 200 - build_data = build_resp.json() - assert build_data["success"] is True - assert build_data["record_count"] > 0 - assert build_data["bucket_count"] > 0 - - status_resp = client.get("/api/catalog/status") - assert status_resp.status_code == 200 - status_data = status_resp.json() - assert status_data["ready"] is True - assert status_data["status"] == "ready" - - -@pytest.mark.unit -def test_catalog_star_crud(client, temp_catalog_dir): - """测试星点 CRUD 接口 / Test catalog star CRUD APIs.""" - create_resp = client.post( - "/api/catalog/stars", - json={ - "source_id": "custom_star_001", - "ra": 123.4, - "dec": 45.6, - "pmra": 0.3, - "pmdec": -0.2, - "phot_g_mean_mag": 6.7, - "name_en": "Custom Star", - "name_zh": "自定义恒星", - "description_en": "Custom star for test", - "description_zh": "测试用自定义恒星", - }, - ) - assert create_resp.status_code == 200 - create_data = create_resp.json() - assert create_data["source_id"] == "custom_star_001" - assert create_data["name_zh"] == "自定义恒星" - - get_resp = client.get("/api/catalog/stars/custom_star_001") - assert get_resp.status_code == 200 - assert get_resp.json()["phot_g_mean_mag"] == 6.7 - - list_resp = client.get("/api/catalog/stars", params={"source_query": "custom_star"}) - assert list_resp.status_code == 200 - list_data = list_resp.json() - assert list_data["total"] >= 1 - - update_resp = client.put( - "/api/catalog/stars/custom_star_001", - json={ - "source_id": "custom_star_001", - "ra": 124.5, - "dec": 44.4, - "pmra": 0.4, - "pmdec": -0.1, - "phot_g_mean_mag": 5.8, - "name_en": "Custom Star Updated", - "name_zh": "自定义恒星更新", - "description_en": "Updated custom star", - "description_zh": "更新后的自定义恒星", - }, - ) - assert update_resp.status_code == 200 - assert update_resp.json()["phot_g_mean_mag"] == 5.8 - assert update_resp.json()["name_en"] == "Custom Star Updated" - - delete_resp = client.delete("/api/catalog/stars/custom_star_001") - assert delete_resp.status_code == 200 - assert delete_resp.json()["success"] is True - - -@pytest.mark.unit -def test_catalog_db_auto_recovery_on_malformed(tmp_path: Path): - """测试损坏数据库自动恢复 / Test auto recovery when SQLite is malformed.""" - from ogscope.data.catalog.service import CatalogService - - broken_catalog_dir = tmp_path / "broken_catalog" - broken_catalog_dir.mkdir(parents=True, exist_ok=True) - db_path = broken_catalog_dir / "stars.db" - db_path.write_bytes(b"this is not a sqlite database") - - service = CatalogService() - service.reconfigure_storage(broken_catalog_dir) - status = service.get_status() - - assert Path(status["db_path"]).exists() - backups = list(broken_catalog_dir.glob("stars_corrupt_*.db")) - assert backups, "应当生成损坏数据库备份 / Corrupted DB backup should be created" diff --git a/tests/unit/test_debug_camera_api.py b/tests/unit/test_debug_camera_api.py index f51d757..4b2c8aa 100644 --- a/tests/unit/test_debug_camera_api.py +++ b/tests/unit/test_debug_camera_api.py @@ -1,6 +1,7 @@ """ 调试相机 API 的第二层最小测试网(无真实硬件依赖)。 """ + import pytest @@ -117,13 +118,17 @@ async def _noop(): monkeypatch.setattr(debug_services, "get_camera_instance", _get_camera_instance) monkeypatch.setattr( - debug_services.DebugCameraService, "_ensure_preview_grabber", staticmethod(_noop) + debug_services.DebugCameraService, + "_ensure_preview_grabber", + staticmethod(_noop), ) monkeypatch.setattr( debug_services.DebugCameraService, "_stop_preview_grabber", staticmethod(_noop) ) monkeypatch.setattr( - debug_services.DebugCameraService, "_restart_preview_grabber", staticmethod(_noop) + debug_services.DebugCameraService, + "_restart_preview_grabber", + staticmethod(_noop), ) return camera @@ -237,4 +242,3 @@ def test_debug_camera_white_balance_switch_success(client, fake_camera_env): body = response.json() assert body["success"] is True assert fake_camera_env.white_balance_mode == "night" - diff --git a/tests/unit/test_debug_files_api.py b/tests/unit/test_debug_files_api.py index 9980b34..cb61f3a 100644 --- a/tests/unit/test_debug_files_api.py +++ b/tests/unit/test_debug_files_api.py @@ -1,6 +1,7 @@ """ 调试文件 API 的最小回归测试。 """ + import json import pytest @@ -54,4 +55,3 @@ def test_debug_files_delete_removes_image_and_info(client, temp_debug_dir): assert "message_key" in delete_resp.json() assert not image_path.exists() assert not info_path.exists() - diff --git a/tests/unit/test_debug_presets_api.py b/tests/unit/test_debug_presets_api.py index 223b1c2..8781a92 100644 --- a/tests/unit/test_debug_presets_api.py +++ b/tests/unit/test_debug_presets_api.py @@ -1,6 +1,7 @@ """ 调试预设 API 的最小回归测试。 """ + import pytest @@ -74,4 +75,3 @@ def test_debug_presets_delete(client, temp_debug_dir): get_resp = client.get("/api/debug/camera/presets") assert get_resp.status_code == 200 assert get_resp.json()["presets"] == [] - diff --git a/tests/unit/test_plate_large_scale_bg.py b/tests/unit/test_plate_large_scale_bg.py new file mode 100644 index 0000000..e10f22b --- /dev/null +++ b/tests/unit/test_plate_large_scale_bg.py @@ -0,0 +1,27 @@ +""" +大尺度背景减除单元测试 / Unit tests for large-scale background flattening. +""" + +import numpy as np +import pytest + +from ogscope.algorithms.plate_solve.solver import subtract_large_scale_background_bgr + + +@pytest.mark.unit +def test_subtract_large_scale_background_bgr_shape_and_range() -> None: + """输出与输入同形且值域在 uint8 / Output shape matches and values in uint8 range.""" + h, w = 120, 160 + bgr = np.zeros((h, w, 3), dtype=np.uint8) + bgr[:, :, 1] = np.linspace(0, 80, w, dtype=np.uint8) + out = subtract_large_scale_background_bgr(bgr, downsample_max_side=64) + assert out.shape == bgr.shape + assert out.dtype == np.uint8 + assert int(out.min()) >= 0 and int(out.max()) <= 255 + + +@pytest.mark.unit +def test_subtract_large_scale_background_bgr_non_bgr_passthrough() -> None: + """非三通道图原样返回 / Non-3-channel frames pass through unchanged.""" + gray = np.zeros((10, 10), dtype=np.uint8) + assert subtract_large_scale_background_bgr(gray, downsample_max_side=32) is gray diff --git a/tests/unit/test_realtime_api.py b/tests/unit/test_realtime_api.py index 80ef464..bd9d90f 100644 --- a/tests/unit/test_realtime_api.py +++ b/tests/unit/test_realtime_api.py @@ -23,7 +23,7 @@ def get_video_frame(self): @pytest.mark.unit -def test_realtime_solver_status_endpoints(client, monkeypatch): +def test_realtime_solver_status_endpoints(client, monkeypatch, mock_plate_solve): """测试实时解算启停接口 / Test realtime solver start and stop endpoints.""" from ogscope.web.api.debug import routes as debug_routes diff --git a/tests/unit/test_solver_performance_baseline.py b/tests/unit/test_solver_performance_baseline.py index 4d8fb9e..f89ca9f 100644 --- a/tests/unit/test_solver_performance_baseline.py +++ b/tests/unit/test_solver_performance_baseline.py @@ -10,11 +10,12 @@ import numpy as np import pytest -from ogscope.algorithms.plate_solve import PlateSolver from ogscope.algorithms.star_extract import StarExtractor -def _synthetic_frame(width: int = 640, height: int = 360, stars: int = 60) -> np.ndarray: +def _synthetic_frame( + width: int = 640, height: int = 360, stars: int = 60 +) -> np.ndarray: """生成合成星空帧 / Generate synthetic star field frame.""" frame = np.zeros((height, width, 3), dtype=np.uint8) rng = np.random.default_rng(42) @@ -27,27 +28,17 @@ def _synthetic_frame(width: int = 640, height: int = 360, stars: int = 60) -> np @pytest.mark.unit @pytest.mark.slow -def test_extract_and_solve_performance_baseline(): - """校验基础性能阈值 / Validate baseline performance threshold.""" +def test_star_extract_performance_baseline(): + """星点提取性能基线(不加载 Tetra 数据库)/ Star extraction baseline without Tetra DB.""" extractor = StarExtractor(max_stars=80) - solver = PlateSolver(fov_deg=16.0) frame = _synthetic_frame() rounds = 40 start = time.perf_counter() for _ in range(rounds): stars = extractor.extract(frame) - solved = solver.solve( - stars=stars, - frame_shape=frame.shape, - hint_ra_deg=12.0, - hint_dec_deg=86.0, - solve_source="full", - ) - assert 0.0 <= solved.ra_deg <= 360.0 - assert -90.0 <= solved.dec_deg <= 90.0 + assert len(stars) >= 0 elapsed = time.perf_counter() - start avg_ms = (elapsed / rounds) * 1000.0 - # Pi Zero 上阈值会更高,这里以开发机回归检测为主 / Threshold is higher on Pi Zero; here we use dev-machine regression guard assert avg_ms < 35.0 diff --git a/web/analysis-ui/README.md b/web/analysis-ui/README.md new file mode 100644 index 0000000..d0cb231 --- /dev/null +++ b/web/analysis-ui/README.md @@ -0,0 +1,36 @@ +# 星空解算控制台前端 / Plate Solve Console UI + +## 用途 / Purpose + +- 技术栈:**Vite 5 + React 18 + TypeScript + Tailwind CSS**。 +- 入口页面由 FastAPI 在 **`GET /debug/analysis`** 提供:若存在 `web/static/analysis-lab/index.html` 则返回 SPA,否则回退旧版 Jinja 模板。 +- 静态资源由 FastAPI 挂载 **`/static`**,本应用 `base` 为 **`/static/analysis-lab/`**。 +- 文案 i18n:**`web/static/i18n/analysis.zh.json`**、**`analysis.en.json`**;开发时 Vite 将 **`/static`** 代理到 FastAPI 以便加载上述 JSON。 + +## 常用命令 / Commands + +```bash +cd web/analysis-ui +npm install # 安装依赖 / Install deps +npm run dev # 开发服务器(见下)/ Dev server +npm run build # 生产构建,输出到 ../static/analysis-lab/ +``` + +## 构建产物 / Build output + +- 目录:**`web/static/analysis-lab/`**(`index.html` + `assets/`)。 +- 部署到开发板前需包含该目录(本机构建后提交,或由 CI `npm ci && npm run build` 生成)。 + +## 本地联调 / Local API + +- `vite.config.ts` 中配置了 **`/api`** 与 **`/static`** → `http://127.0.0.1:8000` 的代理;需同时启动 OGScope 后端(默认 8000 端口)。 +- 开发时访问地址形如:`http://127.0.0.1:5173/static/analysis-lab/`(以终端输出为准)。 + +## 同步开发板 / Sync to board + +- 脚本:**`scripts/sync_dev_board.sh`**(先 `npm run build`,再 `rsync`)。 +- 环境变量:`OGSCOPE_DEV_HOST`、`OGSCOPE_DEV_PATH`(可选 `OGSCOPE_DEV_USER`)。 + +## CI + +- **`.github/workflows/ci.yml`** 在 pytest 前执行 `web/analysis-ui` 下的 `npm ci` 与 `npm run build`。 diff --git a/web/analysis-ui/index.html b/web/analysis-ui/index.html new file mode 100644 index 0000000..9c58106 --- /dev/null +++ b/web/analysis-ui/index.html @@ -0,0 +1,12 @@ + + + + + + OGScope 星空解算控制台 + + +
+ + + diff --git a/web/analysis-ui/package-lock.json b/web/analysis-ui/package-lock.json new file mode 100644 index 0000000..617326b --- /dev/null +++ b/web/analysis-ui/package-lock.json @@ -0,0 +1,2676 @@ +{ + "name": "ogscope-analysis-lab", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "ogscope-analysis-lab", + "version": "0.1.0", + "dependencies": { + "lucide-react": "^0.460.0", + "react": "^18.3.1", + "react-dom": "^18.3.1" + }, + "devDependencies": { + "@types/react": "^18.3.12", + "@types/react-dom": "^18.3.1", + "@vitejs/plugin-react": "^4.3.3", + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.15", + "typescript": "~5.6.2", + "vite": "^5.4.10" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmmirror.com/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmmirror.com/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmmirror.com/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmmirror.com/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.29.2", + "resolved": "https://registry.npmmirror.com/@babel/helpers/-/helpers-7.29.2.tgz", + "integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.2", + "resolved": "https://registry.npmmirror.com/@babel/parser/-/parser-7.29.2.tgz", + "integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmmirror.com/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmmirror.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmmirror.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmmirror.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmmirror.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmmirror.com/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.0.tgz", + "integrity": "sha512-WOhNW9K8bR3kf4zLxbfg6Pxu2ybOUbB2AjMDHSQx86LIF4rH4Ft7vmMwNt0loO0eonglSNy4cpD3MKXXKQu0/A==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.0.tgz", + "integrity": "sha512-u6JHLll5QKRvjciE78bQXDmqRqNs5M/3GVqZeMwvmjaNODJih/WIrJlFVEihvV0MiYFmd+ZyPr9wxOVbPAG2Iw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.0.tgz", + "integrity": "sha512-qEF7CsKKzSRc20Ciu2Zw1wRrBz4g56F7r/vRwY430UPp/nt1x21Q/fpJ9N5l47WWvJlkNCPJz3QRVw008fi7yA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.0.tgz", + "integrity": "sha512-WADYozJ4QCnXCH4wPB+3FuGmDPoFseVCUrANmA5LWwGmC6FL14BWC7pcq+FstOZv3baGX65tZ378uT6WG8ynTw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.0.tgz", + "integrity": "sha512-6b8wGHJlDrGeSE3aH5mGNHBjA0TTkxdoNHik5EkvPHCt351XnigA4pS7Wsj/Eo9Y8RBU6f35cjN9SYmCFBtzxw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.0.tgz", + "integrity": "sha512-h25Ga0t4jaylMB8M/JKAyrvvfxGRjnPQIR8lnCayyzEjEOx2EJIlIiMbhpWxDRKGKF8jbNH01NnN663dH638mA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.0.tgz", + "integrity": "sha512-RzeBwv0B3qtVBWtcuABtSuCzToo2IEAIQrcyB/b2zMvBWVbjo8bZDjACUpnaafaxhTw2W+imQbP2BD1usasK4g==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.0.tgz", + "integrity": "sha512-Sf7zusNI2CIU1HLzuu9Tc5YGAHEZs5Lu7N1ssJG4Tkw6e0MEsN7NdjUDDfGNHy2IU+ENyWT+L2obgWiguWibWQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.0.tgz", + "integrity": "sha512-DX2x7CMcrJzsE91q7/O02IJQ5/aLkVtYFryqCjduJhUfGKG6yJV8hxaw8pZa93lLEpPTP/ohdN4wFz7yp/ry9A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.0.tgz", + "integrity": "sha512-09EL+yFVbJZlhcQfShpswwRZ0Rg+z/CsSELFCnPt3iK+iqwGsI4zht3secj5vLEs957QvFFXnzAT0FFPIxSrkQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.0.tgz", + "integrity": "sha512-i9IcCMPr3EXm8EQg5jnja0Zyc1iFxJjZWlb4wr7U2Wx/GrddOuEafxRdMPRYVaXjgbhvqalp6np07hN1w9kAKw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.0.tgz", + "integrity": "sha512-DGzdJK9kyJ+B78MCkWeGnpXJ91tK/iKA6HwHxF4TAlPIY7GXEvMe8hBFRgdrR9Ly4qebR/7gfUs9y2IoaVEyog==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.0.tgz", + "integrity": "sha512-RwpnLsqC8qbS8z1H1AxBA1H6qknR4YpPR9w2XX0vo2Sz10miu57PkNcnHVaZkbqyw/kUWfKMI73jhmfi9BRMUQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.0.tgz", + "integrity": "sha512-Z8pPf54Ly3aqtdWC3G4rFigZgNvd+qJlOE52fmko3KST9SoGfAdSRCwyoyG05q1HrrAblLbk1/PSIV+80/pxLg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.0.tgz", + "integrity": "sha512-3a3qQustp3COCGvnP4SvrMHnPQ9d1vzCakQVRTliaz8cIp/wULGjiGpbcqrkv0WrHTEp8bQD/B3HBjzujVWLOA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.0.tgz", + "integrity": "sha512-pjZDsVH/1VsghMJ2/kAaxt6dL0psT6ZexQVrijczOf+PeP2BUqTHYejk3l6TlPRydggINOeNRhvpLa0AYpCWSQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.0.tgz", + "integrity": "sha512-3ObQs0BhvPgiUVZrN7gqCSvmFuMWvWvsjG5ayJ3Lraqv+2KhOsp+pUbigqbeWqueGIsnn+09HBw27rJ+gYK4VQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.0.tgz", + "integrity": "sha512-EtylprDtQPdS5rXvAayrNDYoJhIz1/vzN2fEubo3yLE7tfAw+948dO0g4M0vkTVFhKojnF+n6C8bDNe+gDRdTg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.0.tgz", + "integrity": "sha512-k09oiRCi/bHU9UVFqD17r3eJR9bn03TyKraCrlz5ULFJGdJGi7VOmm9jl44vOJvRJ6P7WuBi/s2A97LxxHGIdw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.0.tgz", + "integrity": "sha512-1o/0/pIhozoSaDJoDcec+IVLbnRtQmHwPV730+AOD29lHEEo4F5BEUB24H0OBdhbBBDwIOSuf7vgg0Ywxdfiiw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.0.tgz", + "integrity": "sha512-pESDkos/PDzYwtyzB5p/UoNU/8fJo68vcXM9ZW2V0kjYayj1KaaUfi1NmTUTUpMn4UhU4gTuK8gIaFO4UGuMbA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.0.tgz", + "integrity": "sha512-hj1wFStD7B1YBeYmvY+lWXZ7ey73YGPcViMShYikqKT1GtstIKQAtfUI6yrzPjAy/O7pO0VLXGmUVWXQMaYgTQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.0.tgz", + "integrity": "sha512-SyaIPFoxmUPlNDq5EHkTbiKzmSEmq/gOYFI/3HHJ8iS/v1mbugVa7dXUzcJGQfoytp9DJFLhHH4U3/eTy2Bq4w==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.0.tgz", + "integrity": "sha512-RdcryEfzZr+lAr5kRm2ucN9aVlCCa2QNq4hXelZxb8GG0NJSazq44Z3PCCc8wISRuCVnGs0lQJVX5Vp6fKA+IA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.0.tgz", + "integrity": "sha512-PrsWNQ8BuE00O3Xsx3ALh2Df8fAj9+cvvX9AIA6o4KpATR98c9mud4XtDWVvsEuyia5U4tVSTKygawyJkjm60w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmmirror.com/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmmirror.com/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmmirror.com/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmmirror.com/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmmirror.com/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.15", + "resolved": "https://registry.npmmirror.com/@types/prop-types/-/prop-types-15.7.15.tgz", + "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.3.28", + "resolved": "https://registry.npmmirror.com/@types/react/-/react-18.3.28.tgz", + "integrity": "sha512-z9VXpC7MWrhfWipitjNdgCauoMLRdIILQsAEV+ZesIzBq/oUlxk0m3ApZuMFCXdnS4U7KrI+l3WRUEGQ8K1QKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.7", + "resolved": "https://registry.npmmirror.com/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^18.0.0" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmmirror.com/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmmirror.com/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmmirror.com/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.27", + "resolved": "https://registry.npmmirror.com/autoprefixer/-/autoprefixer-10.4.27.tgz", + "integrity": "sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001774", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.12", + "resolved": "https://registry.npmmirror.com/baseline-browser-mapping/-/baseline-browser-mapping-2.10.12.tgz", + "integrity": "sha512-qyq26DxfY4awP2gIRXhhLWfwzwI+N5Nxk6iQi8EFizIaWIjqicQTE4sLnZZVdeKPRcVNoJOkkpfzoIYuvCKaIQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmmirror.com/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmmirror.com/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001782", + "resolved": "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001782.tgz", + "integrity": "sha512-dZcaJLJeDMh4rELYFw1tvSn1bhZWYFOt468FcbHHxx/Z/dFidd1I6ciyFdi3iwfQCyOjqo9upF6lGQYtMiJWxw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmmirror.com/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmmirror.com/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmmirror.com/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmmirror.com/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmmirror.com/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.328", + "resolved": "https://registry.npmmirror.com/electron-to-chromium/-/electron-to-chromium-1.5.328.tgz", + "integrity": "sha512-QNQ5l45DzYytThO21403XN3FvK0hOkWDG8viNf6jqS42msJ8I4tGDSpBCgvDRRPnkffafiwAym2X2eHeGD2V0w==", + "dev": true, + "license": "ISC" + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmmirror.com/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmmirror.com/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmmirror.com/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmmirror.com/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmmirror.com/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmmirror.com/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmmirror.com/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmmirror.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.460.0", + "resolved": "https://registry.npmmirror.com/lucide-react/-/lucide-react-0.460.0.tgz", + "integrity": "sha512-BVtq/DykVeIvRTJvRAgCsOwaGL8Un3Bxh8MbDxMhEWlZay3T4IpEKDEpwt5KZ0KJMHzgm6jrltxlT5eXOWXDHg==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmmirror.com/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmmirror.com/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.36", + "resolved": "https://registry.npmmirror.com/node-releases/-/node-releases-2.0.36.tgz", + "integrity": "sha512-TdC8FSgHz8Mwtw9g5L4gR/Sh9XhSP/0DEkQxfEFXOpiul5IiHgHan2VhYYb6agDSfp4KuvltmGApc8HMgUrIkA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.2", + "resolved": "https://registry.npmmirror.com/picomatch/-/picomatch-2.3.2.tgz", + "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmmirror.com/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmmirror.com/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmmirror.com/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmmirror.com/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmmirror.com/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmmirror.com/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmmirror.com/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmmirror.com/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmmirror.com/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.60.0", + "resolved": "https://registry.npmmirror.com/rollup/-/rollup-4.60.0.tgz", + "integrity": "sha512-yqjxruMGBQJ2gG4HtjZtAfXArHomazDHoFwFFmZZl0r7Pdo7qCIXKqKHZc8yeoMgzJJ+pO6pEEHa+V7uzWlrAQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.60.0", + "@rollup/rollup-android-arm64": "4.60.0", + "@rollup/rollup-darwin-arm64": "4.60.0", + "@rollup/rollup-darwin-x64": "4.60.0", + "@rollup/rollup-freebsd-arm64": "4.60.0", + "@rollup/rollup-freebsd-x64": "4.60.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.0", + "@rollup/rollup-linux-arm-musleabihf": "4.60.0", + "@rollup/rollup-linux-arm64-gnu": "4.60.0", + "@rollup/rollup-linux-arm64-musl": "4.60.0", + "@rollup/rollup-linux-loong64-gnu": "4.60.0", + "@rollup/rollup-linux-loong64-musl": "4.60.0", + "@rollup/rollup-linux-ppc64-gnu": "4.60.0", + "@rollup/rollup-linux-ppc64-musl": "4.60.0", + "@rollup/rollup-linux-riscv64-gnu": "4.60.0", + "@rollup/rollup-linux-riscv64-musl": "4.60.0", + "@rollup/rollup-linux-s390x-gnu": "4.60.0", + "@rollup/rollup-linux-x64-gnu": "4.60.0", + "@rollup/rollup-linux-x64-musl": "4.60.0", + "@rollup/rollup-openbsd-x64": "4.60.0", + "@rollup/rollup-openharmony-arm64": "4.60.0", + "@rollup/rollup-win32-arm64-msvc": "4.60.0", + "@rollup/rollup-win32-ia32-msvc": "4.60.0", + "@rollup/rollup-win32-x64-gnu": "4.60.0", + "@rollup/rollup-win32-x64-msvc": "4.60.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmmirror.com/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmmirror.com/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmmirror.com/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmmirror.com/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmmirror.com/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmmirror.com/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmmirror.com/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmmirror.com/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmmirror.com/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmmirror.com/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/typescript": { + "version": "5.6.3", + "resolved": "https://registry.npmmirror.com/typescript/-/typescript-5.6.3.tgz", + "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmmirror.com/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "5.4.21", + "resolved": "https://registry.npmmirror.com/vite/-/vite-5.4.21.tgz", + "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + } + } +} diff --git a/web/analysis-ui/package.json b/web/analysis-ui/package.json new file mode 100644 index 0000000..d34edcb --- /dev/null +++ b/web/analysis-ui/package.json @@ -0,0 +1,26 @@ +{ + "name": "ogscope-analysis-lab", + "private": true, + "version": "0.1.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + }, + "dependencies": { + "lucide-react": "^0.460.0", + "react": "^18.3.1", + "react-dom": "^18.3.1" + }, + "devDependencies": { + "@types/react": "^18.3.12", + "@types/react-dom": "^18.3.1", + "@vitejs/plugin-react": "^4.3.3", + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.15", + "typescript": "~5.6.2", + "vite": "^5.4.10" + } +} diff --git a/web/analysis-ui/postcss.config.js b/web/analysis-ui/postcss.config.js new file mode 100644 index 0000000..2aa7205 --- /dev/null +++ b/web/analysis-ui/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/web/analysis-ui/src/App.tsx b/web/analysis-ui/src/App.tsx new file mode 100644 index 0000000..1cd2470 --- /dev/null +++ b/web/analysis-ui/src/App.tsx @@ -0,0 +1,2198 @@ +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { + Database, + FlaskConical, + Grid3x3, + History, + Home, + Loader2, + RefreshCw, + RotateCcw, + Trash2, + Upload, + ZoomIn, + ZoomOut, + ChevronDown, +} from "lucide-react"; +import { + BatchRun, + debugCaptureFileUrl, + deleteExperimentRecord, + deletePoolUpload, + experimentAssetUrl, + exportExperiments, + fetchDebugFileInfo, + fetchDebugFiles, + fetchExperiments, + fetchPresets, + fetchSystemInfo, + fetchUploadExperimentCount, + fetchUploadFileInfo, + fetchUploads, + importFromDebug, + saveExperiment, + saveUserPreset, + solveBatch, + solveImage, + solveVideoFrame, + uploadFile, + uploadFileUrl, + type DebugFileRow, + type SolveParams, + type UploadFileRow, +} from "./api"; +import { + drawSolveOverlay, + drawSolveOverlayVideo, + type SolveOverlay, +} from "./drawOverlay"; +import { useI18n } from "./i18n/I18nProvider"; +import { buildMetaCaptionRows } from "./utils/metaCaption"; +import { formatDateTime, formatFileSize } from "./utils/format"; +import { + formatAngleDeg, + formatProbLine, + parseSolveResult, +} from "./utils/solveDisplay"; + +type View = "lab_image" | "lab_video" | "pool" | "history"; + +const defaultParams = (): SolveParams => ({ + hint_ra_deg: 45, + hint_dec_deg: 80, + fov_estimate: 11, + fov_max_error: undefined, + solve_timeout_ms: 1500, + solve_profile: "balanced", + max_image_side: 1600, + large_scale_bg_subtract: false, + detail_level: "summary", + centroid: { + sigma: 2.5, + max_area: 400, + min_area: 5, + filtsize: 25, + binary_open: true, + max_axis_ratio: undefined, + }, +}); + +function metricsFromResult(result: Record | null): Record { + if (!result) return {}; + return { + matches: result.matches, + rmse_arcsec: result.rmse_arcsec, + status: result.status, + prob: result.prob, + t_solve_ms: result.t_solve_ms, + }; +} + +function countStarsFromOverlay( + result: Record | null, +): number | null { + if (!result) return null; + const ov = result.solve_overlay as SolveOverlay | undefined; + if (ov?.stars_matched?.length) return ov.stars_matched.length; + if (ov?.stars_all_centroids?.length) return ov.stars_all_centroids.length; + if (typeof result.matches === "number") return result.matches; + return null; +} + +const HISTORY_PAGE_SIZE = 30; +/** 调试控制台素材列表每页条数 / Items per page for debug media list */ +const DEBUG_PAGE_SIZE = 6; + +function isImageAsset(name: string): boolean { + return /\.(jpe?g|png|webp|bmp|gif|fits?)$/i.test(name); +} +function isVideoAsset(name: string): boolean { + return /\.(mp4|mov|webm|mkv|avi)$/i.test(name); +} + +export default function App() { + const { t, locale, setLocale } = useI18n(); + const [view, setView] = useState("lab_image"); + const [uploads, setUploads] = useState([]); + const [selected, setSelected] = useState(null); + const [params, setParams] = useState(defaultParams); + const [official, setOfficial] = useState< + Array<{ id: string; name: string; params: SolveParams }> + >([]); + const [userPresets, setUserPresets] = useState< + Array<{ id: string; name: string; params: SolveParams }> + >([]); + const [selectedPresetIds, setSelectedPresetIds] = useState>(new Set()); + const [busy, setBusy] = useState(false); + const [err, setErr] = useState(null); + const [lastResult, setLastResult] = useState | null>(null); + const [batchPack, setBatchPack] = useState<{ + results: Array>; + } | null>(null); + const [historyQ, setHistoryQ] = useState(""); + const [historyPage, setHistoryPage] = useState(1); + const [historyData, setHistoryData] = useState<{ items: unknown[]; total: number } | null>( + null, + ); + const [historyExpandId, setHistoryExpandId] = useState(null); + const [newPresetName, setNewPresetName] = useState(""); + const [layers, setLayers] = useState({ matched: true, pattern: true, all: true }); + const [debugFiles, setDebugFiles] = useState([]); + const [debugPick, setDebugPick] = useState(null); + const [debugPage, setDebugPage] = useState(1); + const [gridOn, setGridOn] = useState(false); + const [zoom, setZoom] = useState(1); + const [imgNatural, setImgNatural] = useState({ w: 0, h: 0 }); + /** 视频文件素材的像素尺寸 / Video file intrinsic size */ + const [videoNatural, setVideoNatural] = useState({ w: 0, h: 0 }); + /** 相机预览 JPEG 尺寸 / Live camera preview image size */ + const [cameraPreviewNatural, setCameraPreviewNatural] = useState({ w: 0, h: 0 }); + /** 视频台:文件预览或设备相机 / Video lab: pool file vs device camera */ + const [videoPreviewMode, setVideoPreviewMode] = useState<"file" | "camera">("file"); + /** 设备相机预览图 blob URL(与 X-Frame-Id 去重)/ Camera preview blob URL, deduped by frame id */ + const [cameraPreviewUrl, setCameraPreviewUrl] = useState(null); + const [videoPreviewError, setVideoPreviewError] = useState(null); + const [cameraSolveRunning, setCameraSolveRunning] = useState(false); + const [meta, setMeta] = useState | null>(null); + const [metaLoading, setMetaLoading] = useState(false); + const [batchRawOpen, setBatchRawOpen] = useState>({}); + const [singleFooterRawOpen, setSingleFooterRawOpen] = useState(false); + /** 最近一次请求全链路耗时(含网络与渲染)/ Last request round-trip (network + render) */ + const [lastRoundTripMs, setLastRoundTripMs] = useState(null); + /** 最近一次解算输入来源 / Last solve input source */ + const [lastSolveSource, setLastSolveSource] = useState<"file" | "camera" | null>(null); + + const imgRef = useRef(null); + const videoRef = useRef(null); + const cameraPreviewImgRef = useRef(null); + const lastCameraFrameIdRef = useRef(null); + const cameraSolveTimerRef = useRef(null); + const cameraSolveInFlightRef = useRef(false); + const cvRef = useRef(null); + const [sysOverview, setSysOverview] = useState(null); + + const loadLists = useCallback(async () => { + const [u, o, usr] = await Promise.all([ + fetchUploads(), + fetchPresets("official"), + fetchPresets("user"), + ]); + setUploads(u.files); + setOfficial(o.presets as typeof official); + setUserPresets(usr.presets as typeof userPresets); + }, []); + + const loadDebugFiles = useCallback(() => { + fetchDebugFiles() + .then((r) => setDebugFiles(r.files)) + .catch(() => setDebugFiles([])); + }, []); + + useEffect(() => { + loadLists().catch((e) => setErr(String(e))); + }, [loadLists]); + + useEffect(() => { + loadDebugFiles(); + }, [loadDebugFiles]); + + useEffect(() => { + if (view !== "history") return; + fetchExperiments(historyQ, historyPage, HISTORY_PAGE_SIZE) + .then(setHistoryData) + .catch((e) => setErr(String(e))); + }, [view, historyQ, historyPage]); + + useEffect(() => { + if (!selected) { + setMeta(null); + return; + } + let cancelled = false; + setMetaLoading(true); + (async () => { + try { + const u = await fetchUploadFileInfo(selected); + if (cancelled) return; + let merged: Record = { ...u }; + try { + const d = await fetchDebugFileInfo(selected); + merged = { ...d, ...u }; + } catch { + /* 仅上传素材时调试目录无同名文件 / No debug file */ + } + setMeta(merged); + } catch { + try { + const d = await fetchDebugFileInfo(selected); + if (!cancelled) setMeta(d); + } catch { + if (!cancelled) setMeta(null); + } + } finally { + if (!cancelled) setMetaLoading(false); + } + })(); + return () => { + cancelled = true; + }; + }, [selected]); + + /** 切换素材时清空上一张的解算结果,避免误读 / Clear solve state when switching assets */ + useEffect(() => { + setLastResult(null); + setBatchPack(null); + setBatchRawOpen({}); + setSingleFooterRawOpen(false); + setLastRoundTripMs(null); + setLastSolveSource(null); + setVideoPreviewMode("file"); + setVideoPreviewError(null); + }, [selected]); + + const overlay = useMemo(() => { + const r = lastResult?.result as Record | undefined; + if (!r) return null; + return (r.solve_overlay || null) as SolveOverlay | null; + }, [lastResult]); + + const resultRow = useMemo(() => { + return (lastResult?.result as Record | undefined) ?? null; + }, [lastResult]); + + const starCount = useMemo(() => countStarsFromOverlay(resultRow), [resultRow]); + + /** 主预览区像素尺寸(图片 / 视频文件 / 相机 JPEG)/ Pixel size for metrics panel */ + const previewPixelDims = useMemo(() => { + if (view === "lab_image") return imgNatural; + if (view === "lab_video") { + return videoPreviewMode === "file" ? videoNatural : cameraPreviewNatural; + } + return { w: 0, h: 0 }; + }, [view, videoPreviewMode, imgNatural, videoNatural, cameraPreviewNatural]); + + const previewUrl = selected ? uploadFileUrl(selected) : ""; + + useEffect(() => { + if (view !== "lab_image") return; + const img = imgRef.current; + const cv = cvRef.current; + if (!img || !cv || !overlay || !selected) return; + const draw = () => drawSolveOverlay(cv, img, overlay, layers); + if (img.complete) draw(); + else img.onload = draw; + }, [overlay, selected, lastResult, layers, view]); + + /** 视频文件:解算叠加 / Video file: solve overlay on current frame */ + useEffect(() => { + if (view !== "lab_video" || videoPreviewMode !== "file") return; + const v = videoRef.current; + const cv = cvRef.current; + if (!v || !cv || !overlay || !selected) return; + const draw = () => drawSolveOverlayVideo(cv, v, overlay, layers); + v.addEventListener("loadeddata", draw); + v.addEventListener("seeked", draw); + if (v.readyState >= 2) draw(); + return () => { + v.removeEventListener("loadeddata", draw); + v.removeEventListener("seeked", draw); + }; + }, [overlay, layers, view, videoPreviewMode, selected, lastResult]); + + /** 设备相机预览:解算叠加在 JPEG 上 / Live camera JPEG + overlay */ + useEffect(() => { + if (view !== "lab_video" || videoPreviewMode !== "camera") return; + const img = cameraPreviewImgRef.current; + const cv = cvRef.current; + if (!img || !cv || !overlay) return; + const draw = () => drawSolveOverlay(cv, img, overlay, layers); + if (img.complete) draw(); + else img.onload = draw; + }, [overlay, layers, view, videoPreviewMode, lastResult, cameraPreviewUrl]); + + /** 共享预览缓存轮询:仅当 X-Frame-Id 变化时更新图像,减少解码与重绘 / Poll shared cache; update img only on new frame id */ + useEffect(() => { + if (view !== "lab_video" || videoPreviewMode !== "camera") return; + let cancelled = false; + const poll = async () => { + if (cancelled) return; + try { + const qs = lastCameraFrameIdRef.current + ? `?since_frame_id=${encodeURIComponent(lastCameraFrameIdRef.current)}` + : ""; + const r = await fetch(`/api/camera/preview${qs}`, { cache: "no-store" }); + if (r.status === 304) return; + if (!r.ok) return; + const fid = r.headers.get("X-Frame-Id"); + if (fid != null) lastCameraFrameIdRef.current = fid; + const blob = await r.blob(); + const url = URL.createObjectURL(blob); + setCameraPreviewUrl((prev) => { + if (prev) URL.revokeObjectURL(prev); + return url; + }); + } catch { + /* 忽略单次失败 / Ignore transient errors */ + } + }; + void poll(); + const id = window.setInterval(() => void poll(), 180); + return () => { + cancelled = true; + clearInterval(id); + setCameraPreviewUrl((prev) => { + if (prev) URL.revokeObjectURL(prev); + return null; + }); + lastCameraFrameIdRef.current = null; + }; + }, [view, videoPreviewMode]); + + useEffect(() => { + const img = imgRef.current; + if (!img) return; + const upd = () => + setImgNatural({ w: img.naturalWidth || 0, h: img.naturalHeight || 0 }); + upd(); + img.addEventListener("load", upd); + return () => img.removeEventListener("load", upd); + }, [selected, previewUrl]); + + useEffect(() => { + const v = videoRef.current; + if (!v) return; + const upd = () => + setVideoNatural({ w: v.videoWidth || 0, h: v.videoHeight || 0 }); + v.addEventListener("loadedmetadata", upd); + v.addEventListener("loadeddata", upd); + upd(); + return () => { + v.removeEventListener("loadedmetadata", upd); + v.removeEventListener("loadeddata", upd); + }; + }, [selected, previewUrl, view]); + + const applyPreset = (p: SolveParams) => { + setParams({ + ...defaultParams(), + ...p, + centroid: { ...defaultParams().centroid, ...p.centroid }, + }); + }; + + const onSingleSolve = async () => { + if (!selected) { + setErr(t("err.selectFile")); + return; + } + setErr(null); + setBusy(true); + const t0 = performance.now(); + try { + const out = (await solveImage(selected, params)) as { result?: Record }; + setLastResult(out as Record); + setBatchPack(null); + setLastSolveSource("file"); + setLastRoundTripMs(performance.now() - t0); + } catch (e) { + setErr(String(e)); + setLastRoundTripMs(null); + } finally { + setBusy(false); + } + }; + + const onBatch = async () => { + if (!selected) { + setErr(t("err.selectFile")); + return; + } + const runs: BatchRun[] = []; + for (const id of selectedPresetIds) { + const all = [...official, ...userPresets]; + const pr = all.find((x) => x.id === id); + if (pr) + runs.push({ + label: pr.name, + params: structuredClone(pr.params) as SolveParams, + }); + } + if (runs.length === 0) { + setErr(t("err.selectPresets")); + return; + } + setErr(null); + setBusy(true); + const t0 = performance.now(); + try { + const pack = (await solveBatch(selected, runs)) as { results: unknown[] }; + setBatchPack({ + results: pack.results as Array>, + }); + setLastResult(null); + setBatchRawOpen({}); + setLastSolveSource("file"); + setLastRoundTripMs(performance.now() - t0); + } catch (e) { + setErr(String(e)); + setLastRoundTripMs(null); + } finally { + setBusy(false); + } + }; + + /** 设备相机当前帧解算(与素材池视频无关)/ Live camera frame solve */ + const onCameraSolve = async () => { + if (cameraSolveInFlightRef.current) return; + setErr(null); + setBusy(true); + cameraSolveInFlightRef.current = true; + const t0 = performance.now(); + try { + const out = await solveVideoFrame({ + source: "camera", + ...params, + }); + setLastResult(out as Record); + setBatchPack(null); + setLastSolveSource("camera"); + setVideoPreviewMode("camera"); + setLastRoundTripMs(performance.now() - t0); + } catch (e) { + setErr(String(e)); + setLastRoundTripMs(null); + } finally { + cameraSolveInFlightRef.current = false; + setBusy(false); + } + }; + + const onVideoFileSolve = async () => { + if (!selected) return; + setErr(null); + setBusy(true); + const t0 = performance.now(); + try { + const vd = videoRef.current; + const out = await solveVideoFrame({ + source: "file", + input_name: selected, + time_sec: vd?.currentTime ?? 0, + ...params, + }); + setLastResult(out as Record); + setBatchPack(null); + setLastSolveSource("file"); + setLastRoundTripMs(performance.now() - t0); + } catch (e) { + setErr(String(e)); + setLastRoundTripMs(null); + } finally { + setBusy(false); + } + }; + + const startCameraSolveLoop = () => { + if (cameraSolveRunning) return; + setCameraSolveRunning(true); + void onCameraSolve(); + cameraSolveTimerRef.current = window.setInterval(() => { + void onCameraSolve(); + }, 1200); + }; + + const stopCameraSolveLoop = () => { + setCameraSolveRunning(false); + if (cameraSolveTimerRef.current != null) { + window.clearInterval(cameraSolveTimerRef.current); + cameraSolveTimerRef.current = null; + } + }; + + const togglePreset = (id: string) => { + setSelectedPresetIds((prev) => { + const n = new Set(prev); + if (n.has(id)) n.delete(id); + else n.add(id); + return n; + }); + }; + + const tryDeleteUpload = async (filename: string) => { + if (!window.confirm(t("delete.uploadFirst", { name: filename }))) return; + let nexp = 0; + try { + const c = await fetchUploadExperimentCount(filename); + nexp = c.count; + } catch { + nexp = 0; + } + if (nexp > 0) { + const ok = window.confirm(t("delete.uploadCascade", { n: nexp })); + if (!ok) return; + } else if (!window.confirm(t("delete.uploadSecond"))) { + return; + } + setBusy(true); + setErr(null); + try { + await deletePoolUpload(filename, { deleteExperiments: nexp > 0 }); + await loadLists(); + if (selected === filename) setSelected(null); + } catch (e) { + setErr(String(e)); + } finally { + setBusy(false); + } + }; + + const tryDeleteExperiment = async (id: string) => { + if (!window.confirm(t("delete.experimentFirst"))) return; + if (!window.confirm(t("delete.experimentSecond"))) return; + setBusy(true); + setErr(null); + try { + await deleteExperimentRecord(id); + if (historyExpandId === id) setHistoryExpandId(null); + const data = await fetchExperiments(historyQ, historyPage, HISTORY_PAGE_SIZE); + setHistoryData(data); + } catch (e) { + setErr(String(e)); + } finally { + setBusy(false); + } + }; + + const setZoomClamped = (z: number) => setZoom(Math.min(4, Math.max(0.5, z))); + + const historyTotalPages = Math.max( + 1, + Math.ceil((historyData?.total ?? 0) / HISTORY_PAGE_SIZE), + ); + + /** 按当前页面只列出对应类型调试素材 / Filter debug captures by lab view */ + const debugFilesForView = useMemo(() => { + if (view === "lab_image") { + return debugFiles.filter((f) => f.type === "image" || isImageAsset(f.name)); + } + if (view === "lab_video") { + return debugFiles.filter((f) => f.type === "video" || isVideoAsset(f.name)); + } + return debugFiles; + }, [debugFiles, view]); + + const debugTotalPages = Math.max( + 1, + Math.ceil(debugFilesForView.length / DEBUG_PAGE_SIZE), + ); + const debugPagedFiles = useMemo(() => { + const start = (debugPage - 1) * DEBUG_PAGE_SIZE; + return debugFilesForView.slice(start, start + DEBUG_PAGE_SIZE); + }, [debugFilesForView, debugPage]); + + useEffect(() => { + setDebugPage(1); + }, [view]); + + useEffect(() => { + setDebugPage((p) => Math.min(p, debugTotalPages)); + }, [debugTotalPages]); + + useEffect(() => { + if (debugPick && !debugFilesForView.some((f) => f.name === debugPick)) { + setDebugPick(null); + } + }, [debugFilesForView, debugPick]); + + const metaCaptionRows = useMemo( + () => buildMetaCaptionRows(meta, locale), + [meta, locale], + ); + + const sidebarUploads = useMemo(() => { + if (view === "lab_image") return uploads.filter((u) => isImageAsset(u.filename)); + if (view === "lab_video") return uploads.filter((u) => isVideoAsset(u.filename)); + return uploads; + }, [uploads, view]); + + const solveHud = useMemo(() => parseSolveResult(resultRow), [resultRow]); + + useEffect(() => { + setSingleFooterRawOpen(false); + }, [lastResult]); + useEffect(() => { + if (view !== "lab_video") return; + let id: ReturnType | undefined; + const tick = () => { + fetchSystemInfo().then(setSysOverview).catch(() => {}); + }; + tick(); + id = setInterval(tick, 1500); + return () => { + if (id) clearInterval(id); + }; + }, [view]); + + useEffect(() => { + if (view !== "lab_video" || videoPreviewMode !== "camera") { + stopCameraSolveLoop(); + } + }, [view, videoPreviewMode]); + + useEffect(() => { + return () => { + stopCameraSolveLoop(); + }; + }, []); + + + return ( +
+
+
+ + {t("app.title")} + + +
+
+
+ + +
+ + {t("nav.cameraDebug")} + + + {t("nav.home")} + +
+
+ +
+ + + {(view === "lab_image" || view === "lab_video") && ( +
+
+ {err && ( +
+ {err} +
+ )} + {view === "lab_video" && ( +
+

{t("lab.videoLiveIntro")}

+
+ + +
+
+ )} +
+ {view === "lab_video" && videoPreviewMode === "camera" ? ( +
+
+ {cameraPreviewUrl ? ( + + setCameraPreviewNatural({ + w: e.currentTarget.naturalWidth, + h: e.currentTarget.naturalHeight, + }) + } + /> + ) : ( +
+ + {t("lab.cameraPreviewLoading")} +
+ )} + +
+
+ ) : selected ? ( +
+ {view === "lab_video" ? ( + <> +
+ + + + +
+
+
+
+ {gridOn && ( +
+ )} +
+
+ {videoPreviewError && ( +
+ {videoPreviewError} +
+ )} +
+ + ) : ( + <> +
+ + + + +
+
+
+ {gridOn && ( +
+ )} + preview + +
+
+ + )} +
+ ) : ( +
+ {view === "lab_video" ? t("lab.selectOrUploadVideo") : t("lab.selectOrUpload")} +
+ )} + {busy && ( +
+ +
+ )} +
+ {((selected && view === "lab_image") || + (view === "lab_video" && + ((videoPreviewMode === "file" && selected) || videoPreviewMode === "camera"))) && ( +
+ + {t("lab.layers")} + +
+ {(["matched", "pattern", "all"] as const).map((k) => ( + + ))} +
+
+ )} + {(selected || (view === "lab_video" && videoPreviewMode === "camera")) && ( + <> +
+
+ + {t("lab.solveSection")} + + +
+ {resultRow ? ( +
+ {solveHud.tSolveMs != null && ( +
+
+ + {t("lab.metric.solveComputeMs")} + + + {solveHud.tSolveMs.toFixed(0)} ms + +
+

+ {t("lab.metric.solveComputeHelp")} +

+
+ )} + {lastRoundTripMs != null && ( +
+
+ + {t("lab.metric.solveRoundTripMs")} + + + {lastRoundTripMs.toFixed(0)} ms + +
+

+ {t("lab.metric.solveRoundTripHelp")} +

+
+ )} + {(solveHud.tBackendTotalMs != null || + solveHud.tOpenDecodeMs != null || + solveHud.tPreprocessMs != null || + solveHud.tExtractMs != null || + solveHud.tSolveMs != null) && ( +
+
+ + {t("lab.metric.backendTotalMs")} + + + {solveHud.tBackendTotalMs != null + ? `${solveHud.tBackendTotalMs.toFixed(0)} ms` + : t("common.placeholder")} + +
+
+ + {t("lab.metric.openDecodeMs")}:{" "} + {solveHud.tOpenDecodeMs != null + ? `${solveHud.tOpenDecodeMs.toFixed(0)} ms` + : t("common.placeholder")} + + + {t("lab.metric.preprocessMs")}:{" "} + {solveHud.tPreprocessMs != null + ? `${solveHud.tPreprocessMs.toFixed(0)} ms` + : t("common.placeholder")} + + + {t("lab.metric.extractMs")}:{" "} + {solveHud.tExtractMs != null + ? `${solveHud.tExtractMs.toFixed(0)} ms` + : t("common.placeholder")} + + + {t("lab.metric.solveOnlyMs")}:{" "} + {solveHud.tSolveMs != null + ? `${solveHud.tSolveMs.toFixed(0)} ms` + : t("common.placeholder")} + +
+
+ )} + {(solveHud.raDeg != null || solveHud.decDeg != null) && ( +
+ + {t("lab.metric.radec")} + +
+ α {formatAngleDeg(solveHud.raDeg)} · δ{" "} + {formatAngleDeg(solveHud.decDeg)} +
+
+ )} +
+ {solveHud.matches != null && ( + + + {t("lab.metric.matches")} + {" "} + {solveHud.matches} + + )} + {solveHud.rmseArcsec != null && ( + + + {t("lab.metric.rmse")} + {" "} + + {solveHud.rmseArcsec.toFixed(2)}″ + + + )} + {solveHud.prob != null && ( + + + + {t("lab.metric.prob")} + {" "} + + {formatProbLine(solveHud.prob, resultRow ?? undefined).line} + + + + {t("lab.metric.probHelp")} + + {formatProbLine(solveHud.prob, resultRow ?? undefined).rawLine && ( + <> + + {t("lab.metric.probRaw")}:{" "} + { + formatProbLine(solveHud.prob, resultRow ?? undefined) + .rawLine + } + + + {t("lab.metric.probRawHelp")} + + + )} + + )} +
+ {solveHud.status && ( +
+ + {t("lab.metric.status")}{" "} + + {solveHud.status} +
+ )} +
+ ) : ( +

{t("common.placeholder")}

+ )} +
+
+
+ + {t("lab.imageSection")} + + +
+
+ {t("lab.resolution")} + + {previewPixelDims.w > 0 + ? `${previewPixelDims.w}×${previewPixelDims.h}` + : t("common.placeholder")} + +
+
+ {t("lab.starsDetected")} + + {starCount != null ? starCount : t("common.placeholder")} + +
+
+ {t("lab.fwhm")} + {t("common.placeholder")} +
+
+
+
+ {selected && ( + <> +
+ + {t("lab.file")}:{" "} + {selected} + + {uploads.find((u) => u.filename === selected)?.source && ( + + {t("lab.source")}:{" "} + {uploads.find((u) => u.filename === selected)?.source} + + )} +
+
+ + {t("lab.meta.title")} + {metaLoading && } + + +
+ {metaCaptionRows.length > 0 ? ( +
+ {metaCaptionRows.map((row) => ( +
+
{t(row.key)}
+
+ {row.value} +
+
+ ))} +
+ ) : meta && !metaLoading ? ( +

+ {t("lab.meta.partial")} +

+ ) : !metaLoading ? ( +

+ {t("lab.meta.noSidecar")} +

+ ) : null} +
+
+ + )} + + )} + {(selected || (view === "lab_video" && videoPreviewMode === "camera")) && + (lastResult || batchPack) && ( +
+
+ {t("results.title")} +
+ {batchPack?.results?.length ? ( + + ) : null} + {lastResult && ( + + )} +
+
+
+
+ {batchPack?.results.map((r, i) => { + const row = r.result as Record | undefined; + const rawOpen = batchRawOpen[i] ?? false; + return ( +
+
+ {String(r.label)} +
+ {r.success ? ( + <> + + + {rawOpen && ( +
+                                    {JSON.stringify(r, null, 2)}
+                                  
+ )} + + ) : ( +
{String(r.error)}
+ )} + {r.success && selected && ( + + )} +
+ ); + })} + {lastResult && !batchPack && ( +
+
+ {t("results.title")} +
+ | undefined} + t={t} + roundTripMs={lastRoundTripMs} + /> + + {singleFooterRawOpen && ( +
+                              {JSON.stringify(lastResult, null, 2)}
+                            
+ )} +
+ )} +
+
+
+ )} +
+ + +
+ )} + + {view === "pool" && ( +
+

+ {t("pool.title")} +

+ + + + + + + + + + + + {uploads.map((u) => ( + + + + + + + + ))} + +
{t("pool.col.name")}{t("pool.col.source")}{t("pool.col.size")}{t("pool.col.time")}{t("pool.delete")}
{u.filename}{u.source ?? t("common.placeholder")}{formatFileSize(u.size)}{formatDateTime(u.modified_at, locale)} + +
+
+ )} + + {view === "history" && ( +
+

+ {t("history.intro")} +

+
+

+ {t("history.title")} +

+ setHistoryQ(e.target.value)} + /> + + + +
+
+ {t("history.total", { n: historyData?.total ?? 0 })} + + + {historyPage} / {historyTotalPages} + + +
+
    + {(historyData?.items as Array>)?.map((row) => { + const id = String(row.id ?? ""); + const metrics = row.metrics as Record | undefined; + const open = historyExpandId === id; + return ( +
  • +
    +
    +
    + {formatDateTime(String(row.created_at ?? ""), locale)} —{" "} + {String(row.input_name)} — {String(row.preset_label)} +
    +
    + {t("history.preset")}: {String(row.preset_label)} · {t("history.metrics")}:{" "} + matches={String(metrics?.matches ?? "—")} rmse= + {String(metrics?.rmse_arcsec ?? "—")} +
    +
    +
    + + +
    +
    + {open && ( +
    + {row.asset_snapshot_relpath ? ( + + ) : null} +
    +                          {JSON.stringify(row.result_json, null, 2)}
    +                        
    +
    + )} +
  • + ); + })} +
+
+ )} +
+
+ ); +} + +function SolveFooterSummary({ + result, + t, + roundTripMs, +}: { + result: Record | null | undefined; + t: (key: string, vars?: Record) => string; + roundTripMs: number | null; +}) { + const s = parseSolveResult(result ?? undefined); + if (!result) { + return

; + } + return ( +
+
+ {s.tBackendTotalMs != null && ( +
+
{t("lab.metric.backendTotalMs")}
+
+ {s.tBackendTotalMs.toFixed(0)} ms +
+
+ )} + {s.tSolveMs != null && ( +
+
{t("lab.metric.solveComputeMs")}
+
+ {s.tSolveMs.toFixed(0)} ms +
+
+ )} + {s.tOpenDecodeMs != null && ( +
+
{t("lab.metric.openDecodeMs")}
+
+ {s.tOpenDecodeMs.toFixed(0)} ms +
+
+ )} + {s.tPreprocessMs != null && ( +
+
{t("lab.metric.preprocessMs")}
+
+ {s.tPreprocessMs.toFixed(0)} ms +
+
+ )} + {s.tExtractMs != null && ( +
+
{t("lab.metric.extractMs")}
+
+ {s.tExtractMs.toFixed(0)} ms +
+
+ )} + {roundTripMs != null && ( +
+
{t("lab.metric.solveRoundTripMs")}
+
+ {roundTripMs.toFixed(0)} ms +
+
+ )} + {s.matches != null && ( +
+
{t("lab.metric.matches")}
+
{s.matches}
+
+ )} + {s.rmseArcsec != null && ( +
+
{t("lab.metric.rmse")}
+
+ {s.rmseArcsec.toFixed(2)}″ +
+
+ )} + {s.prob != null && ( +
+
{t("lab.metric.prob")}
+
+ {formatProbLine(s.prob, result).line} +
+

+ {t("lab.metric.probHelp")} +

+ {formatProbLine(s.prob, result).rawLine && ( +
+ {t("lab.metric.probRaw")}: {formatProbLine(s.prob, result).rawLine} +

+ {t("lab.metric.probRawHelp")} +

+
+ )} +
+ )} +
+
+
{t("lab.metric.radec")}
+
+ α {formatAngleDeg(s.raDeg)} · δ {formatAngleDeg(s.decDeg)} +
+
+ {s.status && ( +
+ {t("lab.metric.status")}: {s.status} +
+ )} +
+ ); +} + +function Field({ + label, + helpKey, + value, + onChange, + type = "number", + step, +}: { + label: string; + helpKey?: string; + value: number | ""; + onChange: (v: number | undefined) => void; + type?: string; + step?: number; +}) { + const { t } = useI18n(); + const help = helpKey ? t(helpKey) : undefined; + return ( + + ); +} diff --git a/web/analysis-ui/src/api.ts b/web/analysis-ui/src/api.ts new file mode 100644 index 0000000..2580c4e --- /dev/null +++ b/web/analysis-ui/src/api.ts @@ -0,0 +1,305 @@ +/** OGScope Analysis Lab API client / 星空解算控制台 API */ + +const API = "/api"; + +export type UploadFileRow = { + filename: string; + size: number; + modified_at: string; + source?: string; + last_solve?: Record; +}; + +export type CentroidParams = { + sigma?: number; + max_area?: number; + min_area?: number; + filtsize?: number; + binary_open?: boolean; + max_axis_ratio?: number; +}; + +export type SolveParams = { + hint_ra_deg?: number | null; + hint_dec_deg?: number | null; + fov_estimate?: number | null; + fov_max_error?: number | null; + solve_timeout_ms?: number | null; + solve_profile?: "speed" | "balanced" | "robust" | null; + centroid?: CentroidParams | null; + max_image_side?: number | null; + /** 提星前大尺度背景减除(角部光晕等)/ Large-scale BG flatten before centroiding */ + large_scale_bg_subtract?: boolean | null; + /** 结果详细程度:summary 仅返回关键字段,full 包含 tetra 原始块 / Result detail level */ + detail_level?: "summary" | "full" | null; +}; + +async function parseJson(resp: Response): Promise { + const ct = resp.headers.get("content-type") || ""; + if (ct.includes("application/json")) { + return resp.json(); + } + const t = await resp.text(); + throw new Error(t || `HTTP ${resp.status}`); +} + +export async function fetchUploads(): Promise<{ files: UploadFileRow[] }> { + const r = await fetch(`${API}/analysis/uploads`); + if (!r.ok) throw new Error(await r.text()); + return r.json() as Promise<{ files: UploadFileRow[] }>; +} + +export async function deletePoolUpload( + filename: string, + options?: { deleteExperiments?: boolean }, +): Promise<{ deleted_experiments?: number }> { + const qs = new URLSearchParams(); + if (options?.deleteExperiments) qs.set("delete_experiments", "true"); + const q = qs.toString(); + const r = await fetch( + `${API}/analysis/uploads/${encodeURIComponent(filename)}${q ? `?${q}` : ""}`, + { method: "DELETE" }, + ); + if (!r.ok) throw new Error(await r.text()); + return (await r.json().catch(() => ({}))) as { deleted_experiments?: number }; +} + +export async function uploadFile( + file: File, + source = "analysis_upload" +): Promise<{ filename: string }> { + const fd = new FormData(); + fd.append("file", file); + fd.append("source", source); + const r = await fetch(`${API}/analysis/upload`, { method: "POST", body: fd }); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data as { filename: string }; +} + +export async function importFromDebug(filename: string): Promise<{ filename: string }> { + const r = await fetch(`${API}/analysis/uploads/import_from_debug`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ filename }), + }); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data as { filename: string }; +} + +export async function solveImage( + input_name: string, + params: SolveParams +): Promise { + const r = await fetch(`${API}/analysis/solve/image`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ input_name, ...params }), + }); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data; +} + +export type BatchRun = { label: string; params: SolveParams }; + +export async function solveBatch( + input_name: string, + runs: BatchRun[] +): Promise { + const r = await fetch(`${API}/analysis/solve/batch`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ input_name, runs }), + }); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data; +} + +export async function fetchPresets(scope: "official" | "user"): Promise<{ + presets: Array<{ id: string; name: string; params: SolveParams; scope: string }>; +}> { + const r = await fetch(`${API}/analysis/presets?scope=${scope}`); + if (!r.ok) throw new Error(await r.text()); + return r.json(); +} + +export async function saveUserPreset( + name: string, + params: SolveParams +): Promise { + const r = await fetch(`${API}/analysis/presets`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ name, params }), + }); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data; +} + +export async function deleteUserPreset(id: string): Promise { + const r = await fetch(`${API}/analysis/presets/${encodeURIComponent(id)}`, { + method: "DELETE", + }); + if (!r.ok) throw new Error(await r.text()); +} + +export async function saveExperiment(payload: { + input_name: string; + preset_label: string; + result_json: unknown; + metrics: Record; + thumbnail_png_base64?: string | null; + replay?: Record | null; + save_asset_snapshot?: boolean; +}): Promise { + const r = await fetch(`${API}/analysis/experiments`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload), + }); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data; +} + +export async function deleteExperimentRecord(experimentId: string): Promise { + const r = await fetch( + `${API}/analysis/experiments/${encodeURIComponent(experimentId)}`, + { method: "DELETE" }, + ); + if (!r.ok) throw new Error(await r.text()); +} + +export async function fetchExperiments( + q: string, + page: number, + pageSize = 30, +): Promise<{ items: unknown[]; total: number; page_size?: number }> { + const qs = new URLSearchParams({ + page: String(page), + page_size: String(pageSize), + }); + if (q) qs.set("q", q); + const r = await fetch(`${API}/analysis/experiments?${qs}`); + if (!r.ok) throw new Error(await r.text()); + return r.json(); +} + +export type DebugFileRow = { + name: string; + size: number; + modified: string; + type: string; +}; + +export async function fetchDebugFiles(): Promise<{ files: DebugFileRow[] }> { + const r = await fetch(`${API}/debug/files`); + if (!r.ok) throw new Error(await r.text()); + return r.json() as Promise<{ files: DebugFileRow[] }>; +} + +export function debugCaptureFileUrl(filename: string): string { + return `${API}/debug/files/${encodeURIComponent(filename)}`; +} + +export async function fetchDebugFileInfo( + filename: string, +): Promise> { + const r = await fetch( + `${API}/debug/files/${encodeURIComponent(filename)}/info`, + ); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data as Record; +} + +export async function fetchUploadFileInfo( + filename: string, +): Promise> { + const r = await fetch( + `${API}/analysis/uploads/${encodeURIComponent(filename)}/info`, + ); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data as Record; +} + +export function uploadFileUrl(filename: string): string { + return `${API}/analysis/uploads/file?filename=${encodeURIComponent(filename)}`; +} + +export async function exportExperiments(fmt: "json" | "csv"): Promise { + const r = await fetch(`${API}/analysis/experiments/export?format=${fmt}`); + if (!r.ok) throw new Error(await r.text()); + return r.text(); +} + + +export async function fetchUploadExperimentCount( + filename: string, +): Promise<{ count: number }> { + const r = await fetch( + `${API}/analysis/uploads/${encodeURIComponent(filename)}/experiment_count`, + ); + if (!r.ok) throw new Error(await r.text()); + return r.json() as Promise<{ count: number }>; +} + +export type LabPublicSettings = { + solver_timeout_ms: number; + star_analysis_target_fps: number; + camera_width: number; + camera_height: number; + camera_fps: number; + solver_fov_deg: number; + solver_max_image_side: number; + solver_large_scale_bg_downsample?: number; +}; + +export async function fetchLabSettings(): Promise { + const r = await fetch(`${API}/analysis/settings`); + if (!r.ok) throw new Error(await r.text()); + return r.json() as Promise; +} + +export type SolveVideoFrameSource = "camera" | "file"; + +export async function solveVideoFrame(payload: { + source: SolveVideoFrameSource; + input_name?: string | null; + frame_index?: number; + time_sec?: number | null; +} & SolveParams): Promise<{ success: boolean; result?: Record }> { + const r = await fetch(`${API}/analysis/solve/frame`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload), + }); + const data = await parseJson(r); + if (!r.ok) throw new Error(String((data as { detail?: string }).detail || r.status)); + return data as { success: boolean; result?: Record }; +} + +export function experimentAssetUrl(experimentId: string): string { + return `${API}/analysis/experiments/${encodeURIComponent(experimentId)}/asset`; +} + +export type SystemInfo = { + platform: string; + os: string; + cpu_usage: number; + memory_usage: number; + temperature: number; + uptime_seconds?: number; + load_average_1m?: number; +}; + +export async function fetchSystemInfo(): Promise { + const r = await fetch(`${API}/system/info`); + if (!r.ok) throw new Error(await r.text()); + return r.json() as Promise; +} diff --git a/web/analysis-ui/src/drawOverlay.ts b/web/analysis-ui/src/drawOverlay.ts new file mode 100644 index 0000000..dc4e39c --- /dev/null +++ b/web/analysis-ui/src/drawOverlay.ts @@ -0,0 +1,94 @@ +/** 解算叠加绘制(与旧 debug-analysis 逻辑一致)/ Solve overlay drawing */ + +export type LayerToggles = { + matched: boolean; + pattern: boolean; + all: boolean; +}; + +export type SolveOverlay = { + stars_all_centroids?: Array<{ x: number; y: number }>; + stars_pattern?: Array<{ x: number; y: number }>; + stars_matched?: Array<{ x: number; y: number; mag?: number }>; +}; + +function drawOverlayCore( + ctx: CanvasRenderingContext2D, + w: number, + h: number, + overlay: SolveOverlay | null | undefined, + layers: LayerToggles, +): void { + if (!overlay) return; + ctx.clearRect(0, 0, w, h); + + if (layers.all && Array.isArray(overlay.stars_all_centroids)) { + ctx.fillStyle = "rgba(156, 163, 175, 0.85)"; + for (const s of overlay.stars_all_centroids) { + ctx.beginPath(); + ctx.arc(s.x, s.y, 2.4, 0, Math.PI * 2); + ctx.fill(); + } + } + if (layers.pattern && Array.isArray(overlay.stars_pattern)) { + ctx.strokeStyle = "rgba(251, 146, 60, 0.95)"; + ctx.lineWidth = 2; + for (const s of overlay.stars_pattern) { + ctx.beginPath(); + ctx.arc(s.x, s.y, 6, 0, Math.PI * 2); + ctx.stroke(); + } + } + if (layers.matched && Array.isArray(overlay.stars_matched)) { + ctx.strokeStyle = "rgba(34, 197, 94, 0.95)"; + ctx.fillStyle = "rgba(34, 197, 94, 0.95)"; + ctx.lineWidth = 2; + ctx.font = "11px system-ui, sans-serif"; + for (const s of overlay.stars_matched) { + ctx.beginPath(); + ctx.arc(s.x, s.y, 7, 0, Math.PI * 2); + ctx.stroke(); + if (s.mag != null) { + ctx.fillText(`m${Number(s.mag).toFixed(1)}`, s.x + 4, s.y - 4); + } + } + } +} + +export function drawSolveOverlay( + canvas: HTMLCanvasElement, + img: HTMLImageElement, + overlay: SolveOverlay | null | undefined, + layers: LayerToggles, +): void { + if (!overlay) return; + const w = img.naturalWidth || 1; + const h = img.naturalHeight || 1; + canvas.width = w; + canvas.height = h; + canvas.style.width = `${img.clientWidth}px`; + canvas.style.height = `${img.clientHeight}px`; + const ctx = canvas.getContext("2d"); + if (!ctx) return; + drawOverlayCore(ctx, w, h, overlay, layers); +} + +/** 视频当前帧上的叠加(坐标与 videoWidth/Height 一致)/ Overlay on video frame pixels */ +export function drawSolveOverlayVideo( + canvas: HTMLCanvasElement, + video: HTMLVideoElement, + overlay: SolveOverlay | null | undefined, + layers: LayerToggles, +): void { + if (!overlay) return; + const w = video.videoWidth || 1; + const h = video.videoHeight || 1; + if (w < 2 || h < 2) return; + canvas.width = w; + canvas.height = h; + canvas.style.width = `${video.clientWidth}px`; + canvas.style.height = `${video.clientHeight}px`; + const ctx = canvas.getContext("2d"); + if (!ctx) return; + drawOverlayCore(ctx, w, h, overlay, layers); +} diff --git a/web/analysis-ui/src/i18n/I18nProvider.tsx b/web/analysis-ui/src/i18n/I18nProvider.tsx new file mode 100644 index 0000000..dc8d0ae --- /dev/null +++ b/web/analysis-ui/src/i18n/I18nProvider.tsx @@ -0,0 +1,61 @@ +import { + createContext, + useContext, + useEffect, + useMemo, + useState, + type ReactNode, +} from "react"; +// 打包进 bundle,避免 fetch /static 失败导致显示原始 key / Bundle JSON to avoid fetch failures +import enDict from "@i18n/analysis.en.json"; +import zhDict from "@i18n/analysis.zh.json"; + +export type Locale = "zh" | "en"; + +type Ctx = { + locale: Locale; + setLocale: (l: Locale) => void; + t: (key: string, vars?: Record) => string; +}; + +const I18nContext = createContext(null); + +const BUNDLED: Record> = { + zh: zhDict as Record, + en: enDict as Record, +}; + +export function I18nProvider({ children }: { children: ReactNode }) { + const [locale, setLocale] = useState("zh"); + const [dict, setDict] = useState>(BUNDLED.zh); + + useEffect(() => { + setDict(BUNDLED[locale]); + }, [locale]); + + const t = useMemo( + () => (key: string, vars?: Record) => { + let s = dict[key] ?? key; + if (vars) { + for (const [k, v] of Object.entries(vars)) { + s = s.replace(new RegExp(`\\{${k}\\}`, "g"), String(v)); + } + } + return s; + }, + [dict], + ); + + const value = useMemo( + () => ({ locale, setLocale, t }), + [locale, t], + ); + + return {children}; +} + +export function useI18n(): Ctx { + const c = useContext(I18nContext); + if (!c) throw new Error("useI18n must be used within I18nProvider"); + return c; +} diff --git a/web/analysis-ui/src/index.css b/web/analysis-ui/src/index.css new file mode 100644 index 0000000..c5e4125 --- /dev/null +++ b/web/analysis-ui/src/index.css @@ -0,0 +1,9 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +html, +body, +#root { + height: 100%; +} diff --git a/web/analysis-ui/src/main.tsx b/web/analysis-ui/src/main.tsx new file mode 100644 index 0000000..ff3fff8 --- /dev/null +++ b/web/analysis-ui/src/main.tsx @@ -0,0 +1,13 @@ +import React from "react"; +import ReactDOM from "react-dom/client"; +import App from "./App"; +import { I18nProvider } from "./i18n/I18nProvider"; +import "./index.css"; + +ReactDOM.createRoot(document.getElementById("root")!).render( + + + + + , +); diff --git a/web/analysis-ui/src/utils/format.ts b/web/analysis-ui/src/utils/format.ts new file mode 100644 index 0000000..e5f9f94 --- /dev/null +++ b/web/analysis-ui/src/utils/format.ts @@ -0,0 +1,21 @@ +/** 文件大小与时间展示 / File size and datetime display */ + +export function formatFileSize(n: number | null | undefined): string { + if (n == null || Number.isNaN(n)) return "—"; + if (n < 1024) return `${n} B`; + if (n < 1024 * 1024) return `${(n / 1024).toFixed(1)} KB`; + return `${(n / (1024 * 1024)).toFixed(2)} MB`; +} + +export function formatDateTime(iso: string | null | undefined, locale: string): string { + if (!iso) return "—"; + try { + const d = new Date(iso); + return new Intl.DateTimeFormat(locale === "en" ? "en-GB" : "zh-CN", { + dateStyle: "short", + timeStyle: "medium", + }).format(d); + } catch { + return iso; + } +} diff --git a/web/analysis-ui/src/utils/metaCaption.ts b/web/analysis-ui/src/utils/metaCaption.ts new file mode 100644 index 0000000..512ae82 --- /dev/null +++ b/web/analysis-ui/src/utils/metaCaption.ts @@ -0,0 +1,69 @@ +/** + * 画面角标与下方说明分工:角标只放分辨率/星点/FWHM 与解算摘要; + * 此处放拍摄侧车与文件属性,避免与角标重复。 + * Corner HUD: resolution/stars/FWHM + solve; caption: exposure/gain/file time/size. + */ + +import { formatDateTime, formatFileSize } from "./format"; + +export type MetaCaptionRow = { key: string; value: string }; + +function str(x: unknown): string | null { + if (x == null) return null; + if (typeof x === "string" && x.trim()) return x.trim(); + if (typeof x === "number" && !Number.isNaN(x)) return String(x); + return null; +} + +/** 曝光微秒转可读 / exposure_us to readable */ +function formatExposureUs(us: unknown): string | null { + if (typeof us !== "number" || us <= 0) return null; + if (us >= 1_000_000) return `${(us / 1_000_000).toFixed(2)} s`; + if (us >= 1000) return `${(us / 1000).toFixed(0)} ms`; + return `${us} µs`; +} + +export function buildMetaCaptionRows( + meta: Record | null, + locale: string, +): MetaCaptionRow[] { + if (!meta) return []; + const rows: MetaCaptionRow[] = []; + + const ex = formatExposureUs(meta.exposure_us); + if (ex) rows.push({ key: "meta.exposure", value: ex }); + + const ag = str(meta.analogue_gain); + const dg = str(meta.digital_gain); + if (ag || dg) { + const parts = [ag ? `A ${ag}` : "", dg ? `D ${dg}` : ""].filter(Boolean); + rows.push({ key: "meta.gain", value: parts.join(" · ") }); + } + + const fps = str(meta.fps); + if (fps) rows.push({ key: "meta.fps", value: fps }); + + const sensor = str(meta.sensor); + if (sensor) rows.push({ key: "meta.sensor", value: sensor }); + + const cm = str(meta.color_mode); + if (cm) rows.push({ key: "meta.colorMode", value: cm }); + + const outRes = str(meta.resolution); + if (outRes) rows.push({ key: "meta.outputResolution", value: outRes }); + + const mod = str(meta.modified); + if (mod) { + rows.push({ + key: "meta.fileTime", + value: formatDateTime(mod, locale), + }); + } + + const sz = meta.size; + if (typeof sz === "number" && sz > 0) { + rows.push({ key: "meta.fileSize", value: formatFileSize(sz) }); + } + + return rows; +} diff --git a/web/analysis-ui/src/utils/solveDisplay.ts b/web/analysis-ui/src/utils/solveDisplay.ts new file mode 100644 index 0000000..e814621 --- /dev/null +++ b/web/analysis-ui/src/utils/solveDisplay.ts @@ -0,0 +1,93 @@ +/** 解算结果摘要(与后端 solve 行一致)/ Solve row summary for UI */ + +export type SolveSummary = { + tSolveMs: number | null; + tExtractMs: number | null; + tPreprocessMs: number | null; + tOpenDecodeMs: number | null; + tBackendTotalMs: number | null; + raDeg: number | null; + decDeg: number | null; + matches: number | null; + rmseArcsec: number | null; + prob: number | null; + status: string | null; +}; + +export function parseSolveResult( + r: Record | null | undefined, +): SolveSummary { + const num = (x: unknown): number | null => + typeof x === "number" && !Number.isNaN(x) ? x : null; + if (!r) { + return { + tSolveMs: null, + tExtractMs: null, + tPreprocessMs: null, + tOpenDecodeMs: null, + tBackendTotalMs: null, + raDeg: null, + decDeg: null, + matches: null, + rmseArcsec: null, + prob: null, + status: null, + }; + } + return { + tSolveMs: num(r.t_solve_ms), + tExtractMs: num(r.t_extract_ms), + tPreprocessMs: num(r.t_preprocess_ms), + tOpenDecodeMs: num(r.t_open_decode_ms), + tBackendTotalMs: num(r.t_backend_total_ms), + raDeg: num(r.ra_deg), + decDeg: num(r.dec_deg), + matches: num(r.matches), + rmseArcsec: num(r.rmse_arcsec), + prob: num(r.prob), + status: typeof r.status === "string" ? r.status : null, + }; +} + +export function formatAngleDeg(v: number | null): string { + if (v == null) return "—"; + return `${v.toFixed(4)}°`; +} + +/** 置信度 0–1 转百分比 / Confidence to percent string */ +export function formatProb(p: number | null): string { + if (p == null) return "—"; + if (p >= 0 && p <= 1) return `${(p * 100).toFixed(1)}%`; + return String(p); +} + +/** 原始 Tetra3 Prob 字段(可能为对数等)/ Raw Prob from tetra block */ +export function formatTetraProb(raw: unknown): string { + if (raw === null || raw === undefined) return "—"; + if (typeof raw === "number" && !Number.isNaN(raw)) { + const a = Math.abs(raw); + if (a > 0 && a < 1e-3) return raw.toExponential(4); + if (a >= 0 && a <= 1) return `${(raw * 100).toFixed(4)}%`; + return String(raw); + } + return String(raw); +} + +/** 置信度展示:极小值与 0 更易读 / Human-readable confidence line */ +export function formatProbLine( + p: number | null, + result: Record | null | undefined, +): { line: string; rawLine: string | null } { + const tetra = result?.tetra as Record | undefined; + const rawProb = tetra ? (tetra.Prob ?? tetra.prob) : undefined; + let line = formatProb(p); + if (p != null && p >= 0 && p <= 1 && p > 0 && p < 0.0001) { + line = `${(p * 100).toExponential(2)}%`; + } + if (p === 0 && rawProb !== undefined && rawProb !== null) { + line = "—"; + } + const rawLine = + rawProb !== undefined && rawProb !== null ? formatTetraProb(rawProb) : null; + return { line, rawLine }; +} diff --git a/web/analysis-ui/src/vite-env.d.ts b/web/analysis-ui/src/vite-env.d.ts new file mode 100644 index 0000000..11f02fe --- /dev/null +++ b/web/analysis-ui/src/vite-env.d.ts @@ -0,0 +1 @@ +/// diff --git a/web/analysis-ui/tailwind.config.js b/web/analysis-ui/tailwind.config.js new file mode 100644 index 0000000..fc14652 --- /dev/null +++ b/web/analysis-ui/tailwind.config.js @@ -0,0 +1,33 @@ +/** @type {import('tailwindcss').Config} */ +export default { + content: ["./index.html", "./src/**/*.{js,ts,jsx,tsx}"], + darkMode: "class", + theme: { + extend: { + colors: { + surface: "#10131a", + background: "#10131a", + "on-surface": "#e1e2eb", + "on-surface-variant": "#c2c6d6", + primary: "#adc6ff", + "primary-container": "#4c8eff", + "on-primary-container": "#00285d", + secondary: "#40e56c", + outline: "#8c909f", + "outline-variant": "#414754", + "surface-container": "#1d2026", + "surface-container-low": "#191c22", + "surface-container-lowest": "#0b0e14", + "surface-container-high": "#272a31", + "surface-container-highest": "#32353c", + error: "#ffb4ab", + "error-container": "#93000a", + }, + fontFamily: { + headline: ["system-ui", "Segoe UI", "sans-serif"], + body: ["system-ui", "Segoe UI", "sans-serif"], + }, + }, + }, + plugins: [], +}; diff --git a/web/analysis-ui/tsconfig.json b/web/analysis-ui/tsconfig.json new file mode 100644 index 0000000..44374fc --- /dev/null +++ b/web/analysis-ui/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "baseUrl": ".", + "paths": { + "@i18n/*": ["../static/i18n/*"] + }, + "target": "ES2022", + "useDefineForClassFields": true, + "lib": ["ES2022", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"] +} diff --git a/web/analysis-ui/tsconfig.node.json b/web/analysis-ui/tsconfig.node.json new file mode 100644 index 0000000..7366cef --- /dev/null +++ b/web/analysis-ui/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "target": "ES2022", + "lib": ["ES2023"], + "module": "ESNext", + "skipLibCheck": true, + "moduleResolution": "bundler" + }, + "include": ["vite.config.ts"] +} diff --git a/web/analysis-ui/vite.config.ts b/web/analysis-ui/vite.config.ts new file mode 100644 index 0000000..6b3d551 --- /dev/null +++ b/web/analysis-ui/vite.config.ts @@ -0,0 +1,31 @@ +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +import { defineConfig } from "vite"; +import react from "@vitejs/plugin-react"; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); + +// 构建产物供 FastAPI 以 /static/analysis-lab/ 挂载 / Build output for FastAPI static mount +export default defineConfig({ + plugins: [react()], + base: "/static/analysis-lab/", + resolve: { + alias: { + // 与 web/static/i18n 共用一份文案 / Single source with FastAPI static + "@i18n": path.resolve(__dirname, "../static/i18n"), + }, + }, + build: { + outDir: "../static/analysis-lab", + emptyOutDir: true, + }, + // 开发时把 /api 转到本机 FastAPI,便于本地联调 / Proxy API to FastAPI during dev + server: { + proxy: { + "/api": "http://127.0.0.1:8000", + // i18n JSON 与 FastAPI /static 一致 / Same as FastAPI static i18n + "/static": "http://127.0.0.1:8000", + }, + }, +}); diff --git a/web/static/analysis-lab/assets/index-CjfavI3Q.js b/web/static/analysis-lab/assets/index-CjfavI3Q.js new file mode 100644 index 0000000..02926e1 --- /dev/null +++ b/web/static/analysis-lab/assets/index-CjfavI3Q.js @@ -0,0 +1,125 @@ +(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const o of l)if(o.type==="childList")for(const a of o.addedNodes)a.tagName==="LINK"&&a.rel==="modulepreload"&&r(a)}).observe(document,{childList:!0,subtree:!0});function n(l){const o={};return l.integrity&&(o.integrity=l.integrity),l.referrerPolicy&&(o.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?o.credentials="include":l.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(l){if(l.ep)return;l.ep=!0;const o=n(l);fetch(l.href,o)}})();function df(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var Mu={exports:{}},Hl={},zu={exports:{}},O={};/** + * @license React + * react.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */var _r=Symbol.for("react.element"),ff=Symbol.for("react.portal"),pf=Symbol.for("react.fragment"),mf=Symbol.for("react.strict_mode"),hf=Symbol.for("react.profiler"),vf=Symbol.for("react.provider"),yf=Symbol.for("react.context"),gf=Symbol.for("react.forward_ref"),xf=Symbol.for("react.suspense"),wf=Symbol.for("react.memo"),Sf=Symbol.for("react.lazy"),ui=Symbol.iterator;function kf(e){return e===null||typeof e!="object"?null:(e=ui&&e[ui]||e["@@iterator"],typeof e=="function"?e:null)}var Ru={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},Tu=Object.assign,Lu={};function Ln(e,t,n){this.props=e,this.context=t,this.refs=Lu,this.updater=n||Ru}Ln.prototype.isReactComponent={};Ln.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};Ln.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function Ou(){}Ou.prototype=Ln.prototype;function Qa(e,t,n){this.props=e,this.context=t,this.refs=Lu,this.updater=n||Ru}var Ka=Qa.prototype=new Ou;Ka.constructor=Qa;Tu(Ka,Ln.prototype);Ka.isPureReactComponent=!0;var ci=Array.isArray,Fu=Object.prototype.hasOwnProperty,Ga={current:null},Iu={key:!0,ref:!0,__self:!0,__source:!0};function Du(e,t,n){var r,l={},o=null,a=null;if(t!=null)for(r in t.ref!==void 0&&(a=t.ref),t.key!==void 0&&(o=""+t.key),t)Fu.call(t,r)&&!Iu.hasOwnProperty(r)&&(l[r]=t[r]);var i=arguments.length-2;if(i===1)l.children=n;else if(1>>1,X=E[H];if(0>>1;Hl(tt,T))_el(nt,tt)?(E[H]=nt,E[_e]=T,H=_e):(E[H]=tt,E[Ke]=T,H=Ke);else if(_el(nt,T))E[H]=nt,E[_e]=T,H=_e;else break e}}return R}function l(E,R){var T=E.sortIndex-R.sortIndex;return T!==0?T:E.id-R.id}if(typeof performance=="object"&&typeof performance.now=="function"){var o=performance;e.unstable_now=function(){return o.now()}}else{var a=Date,i=a.now();e.unstable_now=function(){return a.now()-i}}var u=[],f=[],h=1,y=null,v=3,N=!1,j=!1,_=!1,$=typeof setTimeout=="function"?setTimeout:null,p=typeof clearTimeout=="function"?clearTimeout:null,d=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function m(E){for(var R=n(f);R!==null;){if(R.callback===null)r(f);else if(R.startTime<=E)r(f),R.sortIndex=R.expirationTime,t(u,R);else break;R=n(f)}}function g(E){if(_=!1,m(E),!j)if(n(u)!==null)j=!0,tn(k);else{var R=n(f);R!==null&&It(g,R.startTime-E)}}function k(E,R){j=!1,_&&(_=!1,p(M),M=-1),N=!0;var T=v;try{for(m(R),y=n(u);y!==null&&(!(y.expirationTime>R)||E&&!ne());){var H=y.callback;if(typeof H=="function"){y.callback=null,v=y.priorityLevel;var X=H(y.expirationTime<=R);R=e.unstable_now(),typeof X=="function"?y.callback=X:y===n(u)&&r(u),m(R)}else r(u);y=n(u)}if(y!==null)var je=!0;else{var Ke=n(f);Ke!==null&&It(g,Ke.startTime-R),je=!1}return je}finally{y=null,v=T,N=!1}}var b=!1,C=null,M=-1,U=5,L=-1;function ne(){return!(e.unstable_now()-LE||125H?(E.sortIndex=T,t(f,E),n(u)===null&&E===n(f)&&(_?(p(M),M=-1):_=!0,It(g,T-H))):(E.sortIndex=X,t(u,E),j||N||(j=!0,tn(k))),E},e.unstable_shouldYield=ne,e.unstable_wrapCallback=function(E){var R=v;return function(){var T=v;v=R;try{return E.apply(this,arguments)}finally{v=T}}}})(Bu);Au.exports=Bu;var Lf=Au.exports;/** + * @license React + * react-dom.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */var Of=S,be=Lf;function w(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),qo=Object.prototype.hasOwnProperty,Ff=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,fi={},pi={};function If(e){return qo.call(pi,e)?!0:qo.call(fi,e)?!1:Ff.test(e)?pi[e]=!0:(fi[e]=!0,!1)}function Df(e,t,n,r){if(n!==null&&n.type===0)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return r?!1:n!==null?!n.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function Uf(e,t,n,r){if(t===null||typeof t>"u"||Df(e,t,n,r))return!0;if(r)return!1;if(n!==null)switch(n.type){case 3:return!t;case 4:return t===!1;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}function ge(e,t,n,r,l,o,a){this.acceptsBooleans=t===2||t===3||t===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=o,this.removeEmptyString=a}var ue={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){ue[e]=new ge(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];ue[t]=new ge(t,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){ue[e]=new ge(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){ue[e]=new ge(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){ue[e]=new ge(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){ue[e]=new ge(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){ue[e]=new ge(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){ue[e]=new ge(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){ue[e]=new ge(e,5,!1,e.toLowerCase(),null,!1,!1)});var Xa=/[\-:]([a-z])/g;function Ja(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(Xa,Ja);ue[t]=new ge(t,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(Xa,Ja);ue[t]=new ge(t,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(Xa,Ja);ue[t]=new ge(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){ue[e]=new ge(e,1,!1,e.toLowerCase(),null,!1,!1)});ue.xlinkHref=new ge("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){ue[e]=new ge(e,1,!1,e.toLowerCase(),null,!0,!0)});function Za(e,t,n,r){var l=ue.hasOwnProperty(t)?ue[t]:null;(l!==null?l.type!==0:r||!(2i||l[a]!==o[i]){var u=` +`+l[a].replace(" at new "," at ");return e.displayName&&u.includes("")&&(u=u.replace("",e.displayName)),u}while(1<=a&&0<=i);break}}}finally{So=!1,Error.prepareStackTrace=n}return(e=e?e.displayName||e.name:"")?Yn(e):""}function $f(e){switch(e.tag){case 5:return Yn(e.type);case 16:return Yn("Lazy");case 13:return Yn("Suspense");case 19:return Yn("SuspenseList");case 0:case 2:case 15:return e=ko(e.type,!1),e;case 11:return e=ko(e.type.render,!1),e;case 1:return e=ko(e.type,!0),e;default:return""}}function ra(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case un:return"Fragment";case sn:return"Portal";case ea:return"Profiler";case qa:return"StrictMode";case ta:return"Suspense";case na:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case Qu:return(e.displayName||"Context")+".Consumer";case Wu:return(e._context.displayName||"Context")+".Provider";case es:var t=e.render;return e=e.displayName,e||(e=t.displayName||t.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case ts:return t=e.displayName||null,t!==null?t:ra(e.type)||"Memo";case gt:t=e._payload,e=e._init;try{return ra(e(t))}catch{}}return null}function Hf(e){var t=e.type;switch(e.tag){case 24:return"Cache";case 9:return(t.displayName||"Context")+".Consumer";case 10:return(t._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=t.render,e=e.displayName||e.name||"",t.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return t;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return ra(t);case 8:return t===qa?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof t=="function")return t.displayName||t.name||null;if(typeof t=="string")return t}return null}function Rt(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function Gu(e){var t=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(t==="checkbox"||t==="radio")}function Af(e){var t=Gu(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&typeof n<"u"&&typeof n.get=="function"&&typeof n.set=="function"){var l=n.get,o=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return l.call(this)},set:function(a){r=""+a,o.call(this,a)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(a){r=""+a},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}function Hr(e){e._valueTracker||(e._valueTracker=Af(e))}function Yu(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=Gu(e)?e.checked?"true":"false":e.value),e=r,e!==n?(t.setValue(e),!0):!1}function vl(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function la(e,t){var n=t.checked;return G({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:n??e._wrapperState.initialChecked})}function hi(e,t){var n=t.defaultValue==null?"":t.defaultValue,r=t.checked!=null?t.checked:t.defaultChecked;n=Rt(t.value!=null?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:t.type==="checkbox"||t.type==="radio"?t.checked!=null:t.value!=null}}function Xu(e,t){t=t.checked,t!=null&&Za(e,"checked",t,!1)}function oa(e,t){Xu(e,t);var n=Rt(t.value),r=t.type;if(n!=null)r==="number"?(n===0&&e.value===""||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}t.hasOwnProperty("value")?aa(e,t.type,n):t.hasOwnProperty("defaultValue")&&aa(e,t.type,Rt(t.defaultValue)),t.checked==null&&t.defaultChecked!=null&&(e.defaultChecked=!!t.defaultChecked)}function vi(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!(r!=="submit"&&r!=="reset"||t.value!==void 0&&t.value!==null))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}n=e.name,n!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,n!==""&&(e.name=n)}function aa(e,t,n){(t!=="number"||vl(e.ownerDocument)!==e)&&(n==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}var Xn=Array.isArray;function wn(e,t,n,r){if(e=e.options,t){t={};for(var l=0;l"+t.valueOf().toString()+"",t=Ar.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function ur(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&n.nodeType===3){n.nodeValue=t;return}}e.textContent=t}var qn={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},Bf=["Webkit","ms","Moz","O"];Object.keys(qn).forEach(function(e){Bf.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),qn[t]=qn[e]})});function ec(e,t,n){return t==null||typeof t=="boolean"||t===""?"":n||typeof t!="number"||t===0||qn.hasOwnProperty(e)&&qn[e]?(""+t).trim():t+"px"}function tc(e,t){e=e.style;for(var n in t)if(t.hasOwnProperty(n)){var r=n.indexOf("--")===0,l=ec(n,t[n],r);n==="float"&&(n="cssFloat"),r?e.setProperty(n,l):e[n]=l}}var Vf=G({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function ua(e,t){if(t){if(Vf[e]&&(t.children!=null||t.dangerouslySetInnerHTML!=null))throw Error(w(137,e));if(t.dangerouslySetInnerHTML!=null){if(t.children!=null)throw Error(w(60));if(typeof t.dangerouslySetInnerHTML!="object"||!("__html"in t.dangerouslySetInnerHTML))throw Error(w(61))}if(t.style!=null&&typeof t.style!="object")throw Error(w(62))}}function ca(e,t){if(e.indexOf("-")===-1)return typeof t.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var da=null;function ns(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var fa=null,Sn=null,kn=null;function xi(e){if(e=Pr(e)){if(typeof fa!="function")throw Error(w(280));var t=e.stateNode;t&&(t=Ql(t),fa(e.stateNode,e.type,t))}}function nc(e){Sn?kn?kn.push(e):kn=[e]:Sn=e}function rc(){if(Sn){var e=Sn,t=kn;if(kn=Sn=null,xi(e),t)for(e=0;e>>=0,e===0?32:31-(tp(e)/np|0)|0}var Br=64,Vr=4194304;function Jn(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function wl(e,t){var n=e.pendingLanes;if(n===0)return 0;var r=0,l=e.suspendedLanes,o=e.pingedLanes,a=n&268435455;if(a!==0){var i=a&~l;i!==0?r=Jn(i):(o&=a,o!==0&&(r=Jn(o)))}else a=n&~l,a!==0?r=Jn(a):o!==0&&(r=Jn(o));if(r===0)return 0;if(t!==0&&t!==r&&!(t&l)&&(l=r&-r,o=t&-t,l>=o||l===16&&(o&4194240)!==0))return t;if(r&4&&(r|=n&16),t=e.entangledLanes,t!==0)for(e=e.entanglements,t&=r;0n;n++)t.push(e);return t}function Cr(e,t,n){e.pendingLanes|=t,t!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,t=31-Ve(t),e[t]=n}function ap(e,t){var n=e.pendingLanes&~t;e.pendingLanes=t,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=t,e.mutableReadLanes&=t,e.entangledLanes&=t,t=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=tr),Pi=" ",bi=!1;function Nc(e,t){switch(e){case"keyup":return Lp.indexOf(t.keyCode)!==-1;case"keydown":return t.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function jc(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var cn=!1;function Fp(e,t){switch(e){case"compositionend":return jc(t);case"keypress":return t.which!==32?null:(bi=!0,Pi);case"textInput":return e=t.data,e===Pi&&bi?null:e;default:return null}}function Ip(e,t){if(cn)return e==="compositionend"||!cs&&Nc(e,t)?(e=Sc(),al=ss=kt=null,cn=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1=t)return{node:n,offset:t-e};e=r}e:{for(;n;){if(n.nextSibling){n=n.nextSibling;break e}n=n.parentNode}n=void 0}n=Ti(n)}}function Pc(e,t){return e&&t?e===t?!0:e&&e.nodeType===3?!1:t&&t.nodeType===3?Pc(e,t.parentNode):"contains"in e?e.contains(t):e.compareDocumentPosition?!!(e.compareDocumentPosition(t)&16):!1:!1}function bc(){for(var e=window,t=vl();t instanceof e.HTMLIFrameElement;){try{var n=typeof t.contentWindow.location.href=="string"}catch{n=!1}if(n)e=t.contentWindow;else break;t=vl(e.document)}return t}function ds(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&(t==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||t==="textarea"||e.contentEditable==="true")}function Qp(e){var t=bc(),n=e.focusedElem,r=e.selectionRange;if(t!==n&&n&&n.ownerDocument&&Pc(n.ownerDocument.documentElement,n)){if(r!==null&&ds(n)){if(t=r.start,e=r.end,e===void 0&&(e=t),"selectionStart"in n)n.selectionStart=t,n.selectionEnd=Math.min(e,n.value.length);else if(e=(t=n.ownerDocument||document)&&t.defaultView||window,e.getSelection){e=e.getSelection();var l=n.textContent.length,o=Math.min(r.start,l);r=r.end===void 0?o:Math.min(r.end,l),!e.extend&&o>r&&(l=r,r=o,o=l),l=Li(n,o);var a=Li(n,r);l&&a&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==a.node||e.focusOffset!==a.offset)&&(t=t.createRange(),t.setStart(l.node,l.offset),e.removeAllRanges(),o>r?(e.addRange(t),e.extend(a.node,a.offset)):(t.setEnd(a.node,a.offset),e.addRange(t)))}}for(t=[],e=n;e=e.parentNode;)e.nodeType===1&&t.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof n.focus=="function"&&n.focus(),n=0;n=document.documentMode,dn=null,ga=null,rr=null,xa=!1;function Oi(e,t,n){var r=n.window===n?n.document:n.nodeType===9?n:n.ownerDocument;xa||dn==null||dn!==vl(r)||(r=dn,"selectionStart"in r&&ds(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),rr&&hr(rr,r)||(rr=r,r=Nl(ga,"onSelect"),0mn||(e.current=_a[mn],_a[mn]=null,mn--)}function A(e,t){mn++,_a[mn]=e.current,e.current=t}var Tt={},pe=Ot(Tt),Se=Ot(!1),Kt=Tt;function Pn(e,t){var n=e.type.contextTypes;if(!n)return Tt;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var l={},o;for(o in n)l[o]=t[o];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=l),l}function ke(e){return e=e.childContextTypes,e!=null}function _l(){V(Se),V(pe)}function Ai(e,t,n){if(pe.current!==Tt)throw Error(w(168));A(pe,t),A(Se,n)}function Dc(e,t,n){var r=e.stateNode;if(t=t.childContextTypes,typeof r.getChildContext!="function")return n;r=r.getChildContext();for(var l in r)if(!(l in t))throw Error(w(108,Hf(e)||"Unknown",l));return G({},n,r)}function Cl(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||Tt,Kt=pe.current,A(pe,e),A(Se,Se.current),!0}function Bi(e,t,n){var r=e.stateNode;if(!r)throw Error(w(169));n?(e=Dc(e,t,Kt),r.__reactInternalMemoizedMergedChildContext=e,V(Se),V(pe),A(pe,e)):V(Se),A(Se,n)}var at=null,Kl=!1,Fo=!1;function Uc(e){at===null?at=[e]:at.push(e)}function lm(e){Kl=!0,Uc(e)}function Ft(){if(!Fo&&at!==null){Fo=!0;var e=0,t=D;try{var n=at;for(D=1;e>=a,l-=a,st=1<<32-Ve(t)+l|n<M?(U=C,C=null):U=C.sibling;var L=v(p,C,m[M],g);if(L===null){C===null&&(C=U);break}e&&C&&L.alternate===null&&t(p,C),d=o(L,d,M),b===null?k=L:b.sibling=L,b=L,C=U}if(M===m.length)return n(p,C),W&&$t(p,M),k;if(C===null){for(;MM?(U=C,C=null):U=C.sibling;var ne=v(p,C,L.value,g);if(ne===null){C===null&&(C=U);break}e&&C&&ne.alternate===null&&t(p,C),d=o(ne,d,M),b===null?k=ne:b.sibling=ne,b=ne,C=U}if(L.done)return n(p,C),W&&$t(p,M),k;if(C===null){for(;!L.done;M++,L=m.next())L=y(p,L.value,g),L!==null&&(d=o(L,d,M),b===null?k=L:b.sibling=L,b=L);return W&&$t(p,M),k}for(C=r(p,C);!L.done;M++,L=m.next())L=N(C,p,M,L.value,g),L!==null&&(e&&L.alternate!==null&&C.delete(L.key===null?M:L.key),d=o(L,d,M),b===null?k=L:b.sibling=L,b=L);return e&&C.forEach(function(et){return t(p,et)}),W&&$t(p,M),k}function $(p,d,m,g){if(typeof m=="object"&&m!==null&&m.type===un&&m.key===null&&(m=m.props.children),typeof m=="object"&&m!==null){switch(m.$$typeof){case $r:e:{for(var k=m.key,b=d;b!==null;){if(b.key===k){if(k=m.type,k===un){if(b.tag===7){n(p,b.sibling),d=l(b,m.props.children),d.return=p,p=d;break e}}else if(b.elementType===k||typeof k=="object"&&k!==null&&k.$$typeof===gt&&Qi(k)===b.type){n(p,b.sibling),d=l(b,m.props),d.ref=Wn(p,b,m),d.return=p,p=d;break e}n(p,b);break}else t(p,b);b=b.sibling}m.type===un?(d=Qt(m.props.children,p.mode,g,m.key),d.return=p,p=d):(g=ml(m.type,m.key,m.props,null,p.mode,g),g.ref=Wn(p,d,m),g.return=p,p=g)}return a(p);case sn:e:{for(b=m.key;d!==null;){if(d.key===b)if(d.tag===4&&d.stateNode.containerInfo===m.containerInfo&&d.stateNode.implementation===m.implementation){n(p,d.sibling),d=l(d,m.children||[]),d.return=p,p=d;break e}else{n(p,d);break}else t(p,d);d=d.sibling}d=Vo(m,p.mode,g),d.return=p,p=d}return a(p);case gt:return b=m._init,$(p,d,b(m._payload),g)}if(Xn(m))return j(p,d,m,g);if($n(m))return _(p,d,m,g);Jr(p,m)}return typeof m=="string"&&m!==""||typeof m=="number"?(m=""+m,d!==null&&d.tag===6?(n(p,d.sibling),d=l(d,m),d.return=p,p=d):(n(p,d),d=Bo(m,p.mode,g),d.return=p,p=d),a(p)):n(p,d)}return $}var Mn=Bc(!0),Vc=Bc(!1),bl=Ot(null),Ml=null,yn=null,hs=null;function vs(){hs=yn=Ml=null}function ys(e){var t=bl.current;V(bl),e._currentValue=t}function Pa(e,t,n){for(;e!==null;){var r=e.alternate;if((e.childLanes&t)!==t?(e.childLanes|=t,r!==null&&(r.childLanes|=t)):r!==null&&(r.childLanes&t)!==t&&(r.childLanes|=t),e===n)break;e=e.return}}function jn(e,t){Ml=e,hs=yn=null,e=e.dependencies,e!==null&&e.firstContext!==null&&(e.lanes&t&&(we=!0),e.firstContext=null)}function De(e){var t=e._currentValue;if(hs!==e)if(e={context:e,memoizedValue:t,next:null},yn===null){if(Ml===null)throw Error(w(308));yn=e,Ml.dependencies={lanes:0,firstContext:e}}else yn=yn.next=e;return t}var Bt=null;function gs(e){Bt===null?Bt=[e]:Bt.push(e)}function Wc(e,t,n,r){var l=t.interleaved;return l===null?(n.next=n,gs(t)):(n.next=l.next,l.next=n),t.interleaved=n,ft(e,r)}function ft(e,t){e.lanes|=t;var n=e.alternate;for(n!==null&&(n.lanes|=t),n=e,e=e.return;e!==null;)e.childLanes|=t,n=e.alternate,n!==null&&(n.childLanes|=t),n=e,e=e.return;return n.tag===3?n.stateNode:null}var xt=!1;function xs(e){e.updateQueue={baseState:e.memoizedState,firstBaseUpdate:null,lastBaseUpdate:null,shared:{pending:null,interleaved:null,lanes:0},effects:null}}function Qc(e,t){e=e.updateQueue,t.updateQueue===e&&(t.updateQueue={baseState:e.baseState,firstBaseUpdate:e.firstBaseUpdate,lastBaseUpdate:e.lastBaseUpdate,shared:e.shared,effects:e.effects})}function ut(e,t){return{eventTime:e,lane:t,tag:0,payload:null,callback:null,next:null}}function Pt(e,t,n){var r=e.updateQueue;if(r===null)return null;if(r=r.shared,F&2){var l=r.pending;return l===null?t.next=t:(t.next=l.next,l.next=t),r.pending=t,ft(e,n)}return l=r.interleaved,l===null?(t.next=t,gs(r)):(t.next=l.next,l.next=t),r.interleaved=t,ft(e,n)}function il(e,t,n){if(t=t.updateQueue,t!==null&&(t=t.shared,(n&4194240)!==0)){var r=t.lanes;r&=e.pendingLanes,n|=r,t.lanes=n,ls(e,n)}}function Ki(e,t){var n=e.updateQueue,r=e.alternate;if(r!==null&&(r=r.updateQueue,n===r)){var l=null,o=null;if(n=n.firstBaseUpdate,n!==null){do{var a={eventTime:n.eventTime,lane:n.lane,tag:n.tag,payload:n.payload,callback:n.callback,next:null};o===null?l=o=a:o=o.next=a,n=n.next}while(n!==null);o===null?l=o=t:o=o.next=t}else l=o=t;n={baseState:r.baseState,firstBaseUpdate:l,lastBaseUpdate:o,shared:r.shared,effects:r.effects},e.updateQueue=n;return}e=n.lastBaseUpdate,e===null?n.firstBaseUpdate=t:e.next=t,n.lastBaseUpdate=t}function zl(e,t,n,r){var l=e.updateQueue;xt=!1;var o=l.firstBaseUpdate,a=l.lastBaseUpdate,i=l.shared.pending;if(i!==null){l.shared.pending=null;var u=i,f=u.next;u.next=null,a===null?o=f:a.next=f,a=u;var h=e.alternate;h!==null&&(h=h.updateQueue,i=h.lastBaseUpdate,i!==a&&(i===null?h.firstBaseUpdate=f:i.next=f,h.lastBaseUpdate=u))}if(o!==null){var y=l.baseState;a=0,h=f=u=null,i=o;do{var v=i.lane,N=i.eventTime;if((r&v)===v){h!==null&&(h=h.next={eventTime:N,lane:0,tag:i.tag,payload:i.payload,callback:i.callback,next:null});e:{var j=e,_=i;switch(v=t,N=n,_.tag){case 1:if(j=_.payload,typeof j=="function"){y=j.call(N,y,v);break e}y=j;break e;case 3:j.flags=j.flags&-65537|128;case 0:if(j=_.payload,v=typeof j=="function"?j.call(N,y,v):j,v==null)break e;y=G({},y,v);break e;case 2:xt=!0}}i.callback!==null&&i.lane!==0&&(e.flags|=64,v=l.effects,v===null?l.effects=[i]:v.push(i))}else N={eventTime:N,lane:v,tag:i.tag,payload:i.payload,callback:i.callback,next:null},h===null?(f=h=N,u=y):h=h.next=N,a|=v;if(i=i.next,i===null){if(i=l.shared.pending,i===null)break;v=i,i=v.next,v.next=null,l.lastBaseUpdate=v,l.shared.pending=null}}while(!0);if(h===null&&(u=y),l.baseState=u,l.firstBaseUpdate=f,l.lastBaseUpdate=h,t=l.shared.interleaved,t!==null){l=t;do a|=l.lane,l=l.next;while(l!==t)}else o===null&&(l.shared.lanes=0);Xt|=a,e.lanes=a,e.memoizedState=y}}function Gi(e,t,n){if(e=t.effects,t.effects=null,e!==null)for(t=0;tn?n:4,e(!0);var r=Do.transition;Do.transition={};try{e(!1),t()}finally{D=n,Do.transition=r}}function ud(){return Ue().memoizedState}function im(e,t,n){var r=Mt(e);if(n={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null},cd(e))dd(t,n);else if(n=Wc(e,t,n,r),n!==null){var l=ve();We(n,e,r,l),fd(n,t,r)}}function um(e,t,n){var r=Mt(e),l={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null};if(cd(e))dd(t,l);else{var o=e.alternate;if(e.lanes===0&&(o===null||o.lanes===0)&&(o=t.lastRenderedReducer,o!==null))try{var a=t.lastRenderedState,i=o(a,n);if(l.hasEagerState=!0,l.eagerState=i,Qe(i,a)){var u=t.interleaved;u===null?(l.next=l,gs(t)):(l.next=u.next,u.next=l),t.interleaved=l;return}}catch{}finally{}n=Wc(e,t,l,r),n!==null&&(l=ve(),We(n,e,r,l),fd(n,t,r))}}function cd(e){var t=e.alternate;return e===K||t!==null&&t===K}function dd(e,t){lr=Tl=!0;var n=e.pending;n===null?t.next=t:(t.next=n.next,n.next=t),e.pending=t}function fd(e,t,n){if(n&4194240){var r=t.lanes;r&=e.pendingLanes,n|=r,t.lanes=n,ls(e,n)}}var Ll={readContext:De,useCallback:ce,useContext:ce,useEffect:ce,useImperativeHandle:ce,useInsertionEffect:ce,useLayoutEffect:ce,useMemo:ce,useReducer:ce,useRef:ce,useState:ce,useDebugValue:ce,useDeferredValue:ce,useTransition:ce,useMutableSource:ce,useSyncExternalStore:ce,useId:ce,unstable_isNewReconciler:!1},cm={readContext:De,useCallback:function(e,t){return Xe().memoizedState=[e,t===void 0?null:t],e},useContext:De,useEffect:Xi,useImperativeHandle:function(e,t,n){return n=n!=null?n.concat([e]):null,cl(4194308,4,ld.bind(null,t,e),n)},useLayoutEffect:function(e,t){return cl(4194308,4,e,t)},useInsertionEffect:function(e,t){return cl(4,2,e,t)},useMemo:function(e,t){var n=Xe();return t=t===void 0?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=Xe();return t=n!==void 0?n(t):t,r.memoizedState=r.baseState=t,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:t},r.queue=e,e=e.dispatch=im.bind(null,K,e),[r.memoizedState,e]},useRef:function(e){var t=Xe();return e={current:e},t.memoizedState=e},useState:Yi,useDebugValue:Es,useDeferredValue:function(e){return Xe().memoizedState=e},useTransition:function(){var e=Yi(!1),t=e[0];return e=sm.bind(null,e[1]),Xe().memoizedState=e,[t,e]},useMutableSource:function(){},useSyncExternalStore:function(e,t,n){var r=K,l=Xe();if(W){if(n===void 0)throw Error(w(407));n=n()}else{if(n=t(),ae===null)throw Error(w(349));Yt&30||Xc(r,t,n)}l.memoizedState=n;var o={value:n,getSnapshot:t};return l.queue=o,Xi(Zc.bind(null,r,o,e),[e]),r.flags|=2048,Nr(9,Jc.bind(null,r,o,n,t),void 0,null),n},useId:function(){var e=Xe(),t=ae.identifierPrefix;if(W){var n=it,r=st;n=(r&~(1<<32-Ve(r)-1)).toString(32)+n,t=":"+t+"R"+n,n=Sr++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=a.createElement(n,{is:r.is}):(e=a.createElement(n),n==="select"&&(a=e,r.multiple?a.multiple=!0:r.size&&(a.size=r.size))):e=a.createElementNS(e,n),e[Je]=t,e[gr]=r,kd(e,t,!1,!1),t.stateNode=e;e:{switch(a=ca(n,r),n){case"dialog":B("cancel",e),B("close",e),l=r;break;case"iframe":case"object":case"embed":B("load",e),l=r;break;case"video":case"audio":for(l=0;lTn&&(t.flags|=128,r=!0,Qn(o,!1),t.lanes=4194304)}else{if(!r)if(e=Rl(a),e!==null){if(t.flags|=128,r=!0,n=e.updateQueue,n!==null&&(t.updateQueue=n,t.flags|=4),Qn(o,!0),o.tail===null&&o.tailMode==="hidden"&&!a.alternate&&!W)return de(t),null}else 2*J()-o.renderingStartTime>Tn&&n!==1073741824&&(t.flags|=128,r=!0,Qn(o,!1),t.lanes=4194304);o.isBackwards?(a.sibling=t.child,t.child=a):(n=o.last,n!==null?n.sibling=a:t.child=a,o.last=a)}return o.tail!==null?(t=o.tail,o.rendering=t,o.tail=t.sibling,o.renderingStartTime=J(),t.sibling=null,n=Q.current,A(Q,r?n&1|2:n&1),t):(de(t),null);case 22:case 23:return Ts(),r=t.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(t.flags|=8192),r&&t.mode&1?Ce&1073741824&&(de(t),t.subtreeFlags&6&&(t.flags|=8192)):de(t),null;case 24:return null;case 25:return null}throw Error(w(156,t.tag))}function gm(e,t){switch(ps(t),t.tag){case 1:return ke(t.type)&&_l(),e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 3:return zn(),V(Se),V(pe),ks(),e=t.flags,e&65536&&!(e&128)?(t.flags=e&-65537|128,t):null;case 5:return Ss(t),null;case 13:if(V(Q),e=t.memoizedState,e!==null&&e.dehydrated!==null){if(t.alternate===null)throw Error(w(340));bn()}return e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 19:return V(Q),null;case 4:return zn(),null;case 10:return ys(t.type._context),null;case 22:case 23:return Ts(),null;case 24:return null;default:return null}}var qr=!1,fe=!1,xm=typeof WeakSet=="function"?WeakSet:Set,P=null;function gn(e,t){var n=e.ref;if(n!==null)if(typeof n=="function")try{n(null)}catch(r){Y(e,t,r)}else n.current=null}function Ia(e,t,n){try{n()}catch(r){Y(e,t,r)}}var su=!1;function wm(e,t){if(wa=Sl,e=bc(),ds(e)){if("selectionStart"in e)var n={start:e.selectionStart,end:e.selectionEnd};else e:{n=(n=e.ownerDocument)&&n.defaultView||window;var r=n.getSelection&&n.getSelection();if(r&&r.rangeCount!==0){n=r.anchorNode;var l=r.anchorOffset,o=r.focusNode;r=r.focusOffset;try{n.nodeType,o.nodeType}catch{n=null;break e}var a=0,i=-1,u=-1,f=0,h=0,y=e,v=null;t:for(;;){for(var N;y!==n||l!==0&&y.nodeType!==3||(i=a+l),y!==o||r!==0&&y.nodeType!==3||(u=a+r),y.nodeType===3&&(a+=y.nodeValue.length),(N=y.firstChild)!==null;)v=y,y=N;for(;;){if(y===e)break t;if(v===n&&++f===l&&(i=a),v===o&&++h===r&&(u=a),(N=y.nextSibling)!==null)break;y=v,v=y.parentNode}y=N}n=i===-1||u===-1?null:{start:i,end:u}}else n=null}n=n||{start:0,end:0}}else n=null;for(Sa={focusedElem:e,selectionRange:n},Sl=!1,P=t;P!==null;)if(t=P,e=t.child,(t.subtreeFlags&1028)!==0&&e!==null)e.return=t,P=e;else for(;P!==null;){t=P;try{var j=t.alternate;if(t.flags&1024)switch(t.tag){case 0:case 11:case 15:break;case 1:if(j!==null){var _=j.memoizedProps,$=j.memoizedState,p=t.stateNode,d=p.getSnapshotBeforeUpdate(t.elementType===t.type?_:He(t.type,_),$);p.__reactInternalSnapshotBeforeUpdate=d}break;case 3:var m=t.stateNode.containerInfo;m.nodeType===1?m.textContent="":m.nodeType===9&&m.documentElement&&m.removeChild(m.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(w(163))}}catch(g){Y(t,t.return,g)}if(e=t.sibling,e!==null){e.return=t.return,P=e;break}P=t.return}return j=su,su=!1,j}function or(e,t,n){var r=t.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var o=l.destroy;l.destroy=void 0,o!==void 0&&Ia(t,n,o)}l=l.next}while(l!==r)}}function Xl(e,t){if(t=t.updateQueue,t=t!==null?t.lastEffect:null,t!==null){var n=t=t.next;do{if((n.tag&e)===e){var r=n.create;n.destroy=r()}n=n.next}while(n!==t)}}function Da(e){var t=e.ref;if(t!==null){var n=e.stateNode;switch(e.tag){case 5:e=n;break;default:e=n}typeof t=="function"?t(e):t.current=e}}function _d(e){var t=e.alternate;t!==null&&(e.alternate=null,_d(t)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(t=e.stateNode,t!==null&&(delete t[Je],delete t[gr],delete t[ja],delete t[nm],delete t[rm])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function Cd(e){return e.tag===5||e.tag===3||e.tag===4}function iu(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||Cd(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function Ua(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.nodeType===8?n.parentNode.insertBefore(e,t):n.insertBefore(e,t):(n.nodeType===8?(t=n.parentNode,t.insertBefore(e,n)):(t=n,t.appendChild(e)),n=n._reactRootContainer,n!=null||t.onclick!==null||(t.onclick=jl));else if(r!==4&&(e=e.child,e!==null))for(Ua(e,t,n),e=e.sibling;e!==null;)Ua(e,t,n),e=e.sibling}function $a(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.insertBefore(e,t):n.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for($a(e,t,n),e=e.sibling;e!==null;)$a(e,t,n),e=e.sibling}var se=null,Ae=!1;function yt(e,t,n){for(n=n.child;n!==null;)Ed(e,t,n),n=n.sibling}function Ed(e,t,n){if(Ze&&typeof Ze.onCommitFiberUnmount=="function")try{Ze.onCommitFiberUnmount(Al,n)}catch{}switch(n.tag){case 5:fe||gn(n,t);case 6:var r=se,l=Ae;se=null,yt(e,t,n),se=r,Ae=l,se!==null&&(Ae?(e=se,n=n.stateNode,e.nodeType===8?e.parentNode.removeChild(n):e.removeChild(n)):se.removeChild(n.stateNode));break;case 18:se!==null&&(Ae?(e=se,n=n.stateNode,e.nodeType===8?Oo(e.parentNode,n):e.nodeType===1&&Oo(e,n),pr(e)):Oo(se,n.stateNode));break;case 4:r=se,l=Ae,se=n.stateNode.containerInfo,Ae=!0,yt(e,t,n),se=r,Ae=l;break;case 0:case 11:case 14:case 15:if(!fe&&(r=n.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var o=l,a=o.destroy;o=o.tag,a!==void 0&&(o&2||o&4)&&Ia(n,t,a),l=l.next}while(l!==r)}yt(e,t,n);break;case 1:if(!fe&&(gn(n,t),r=n.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=n.memoizedProps,r.state=n.memoizedState,r.componentWillUnmount()}catch(i){Y(n,t,i)}yt(e,t,n);break;case 21:yt(e,t,n);break;case 22:n.mode&1?(fe=(r=fe)||n.memoizedState!==null,yt(e,t,n),fe=r):yt(e,t,n);break;default:yt(e,t,n)}}function uu(e){var t=e.updateQueue;if(t!==null){e.updateQueue=null;var n=e.stateNode;n===null&&(n=e.stateNode=new xm),t.forEach(function(r){var l=bm.bind(null,e,r);n.has(r)||(n.add(r),r.then(l,l))})}}function $e(e,t){var n=t.deletions;if(n!==null)for(var r=0;rl&&(l=a),r&=~o}if(r=l,r=J()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*km(r/1960))-r,10e?16:e,Nt===null)var r=!1;else{if(e=Nt,Nt=null,Il=0,F&6)throw Error(w(331));var l=F;for(F|=4,P=e.current;P!==null;){var o=P,a=o.child;if(P.flags&16){var i=o.deletions;if(i!==null){for(var u=0;uJ()-zs?Wt(e,0):Ms|=n),Ne(e,t)}function Od(e,t){t===0&&(e.mode&1?(t=Vr,Vr<<=1,!(Vr&130023424)&&(Vr=4194304)):t=1);var n=ve();e=ft(e,t),e!==null&&(Cr(e,t,n),Ne(e,n))}function Pm(e){var t=e.memoizedState,n=0;t!==null&&(n=t.retryLane),Od(e,n)}function bm(e,t){var n=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(n=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(w(314))}r!==null&&r.delete(t),Od(e,n)}var Fd;Fd=function(e,t,n){if(e!==null)if(e.memoizedProps!==t.pendingProps||Se.current)we=!0;else{if(!(e.lanes&n)&&!(t.flags&128))return we=!1,vm(e,t,n);we=!!(e.flags&131072)}else we=!1,W&&t.flags&1048576&&$c(t,Pl,t.index);switch(t.lanes=0,t.tag){case 2:var r=t.type;dl(e,t),e=t.pendingProps;var l=Pn(t,pe.current);jn(t,n),l=js(null,t,r,e,l,n);var o=_s();return t.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(t.tag=1,t.memoizedState=null,t.updateQueue=null,ke(r)?(o=!0,Cl(t)):o=!1,t.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,xs(t),l.updater=Yl,t.stateNode=l,l._reactInternals=t,Ma(t,r,e,n),t=Ta(null,t,r,!0,o,n)):(t.tag=0,W&&o&&fs(t),he(null,t,l,n),t=t.child),t;case 16:r=t.elementType;e:{switch(dl(e,t),e=t.pendingProps,l=r._init,r=l(r._payload),t.type=r,l=t.tag=zm(r),e=He(r,e),l){case 0:t=Ra(null,t,r,e,n);break e;case 1:t=lu(null,t,r,e,n);break e;case 11:t=nu(null,t,r,e,n);break e;case 14:t=ru(null,t,r,He(r.type,e),n);break e}throw Error(w(306,r,""))}return t;case 0:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:He(r,l),Ra(e,t,r,l,n);case 1:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:He(r,l),lu(e,t,r,l,n);case 3:e:{if(xd(t),e===null)throw Error(w(387));r=t.pendingProps,o=t.memoizedState,l=o.element,Qc(e,t),zl(t,r,null,n);var a=t.memoizedState;if(r=a.element,o.isDehydrated)if(o={element:r,isDehydrated:!1,cache:a.cache,pendingSuspenseBoundaries:a.pendingSuspenseBoundaries,transitions:a.transitions},t.updateQueue.baseState=o,t.memoizedState=o,t.flags&256){l=Rn(Error(w(423)),t),t=ou(e,t,r,n,l);break e}else if(r!==l){l=Rn(Error(w(424)),t),t=ou(e,t,r,n,l);break e}else for(Ee=Et(t.stateNode.containerInfo.firstChild),Pe=t,W=!0,Be=null,n=Vc(t,null,r,n),t.child=n;n;)n.flags=n.flags&-3|4096,n=n.sibling;else{if(bn(),r===l){t=pt(e,t,n);break e}he(e,t,r,n)}t=t.child}return t;case 5:return Kc(t),e===null&&Ea(t),r=t.type,l=t.pendingProps,o=e!==null?e.memoizedProps:null,a=l.children,ka(r,l)?a=null:o!==null&&ka(r,o)&&(t.flags|=32),gd(e,t),he(e,t,a,n),t.child;case 6:return e===null&&Ea(t),null;case 13:return wd(e,t,n);case 4:return ws(t,t.stateNode.containerInfo),r=t.pendingProps,e===null?t.child=Mn(t,null,r,n):he(e,t,r,n),t.child;case 11:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:He(r,l),nu(e,t,r,l,n);case 7:return he(e,t,t.pendingProps,n),t.child;case 8:return he(e,t,t.pendingProps.children,n),t.child;case 12:return he(e,t,t.pendingProps.children,n),t.child;case 10:e:{if(r=t.type._context,l=t.pendingProps,o=t.memoizedProps,a=l.value,A(bl,r._currentValue),r._currentValue=a,o!==null)if(Qe(o.value,a)){if(o.children===l.children&&!Se.current){t=pt(e,t,n);break e}}else for(o=t.child,o!==null&&(o.return=t);o!==null;){var i=o.dependencies;if(i!==null){a=o.child;for(var u=i.firstContext;u!==null;){if(u.context===r){if(o.tag===1){u=ut(-1,n&-n),u.tag=2;var f=o.updateQueue;if(f!==null){f=f.shared;var h=f.pending;h===null?u.next=u:(u.next=h.next,h.next=u),f.pending=u}}o.lanes|=n,u=o.alternate,u!==null&&(u.lanes|=n),Pa(o.return,n,t),i.lanes|=n;break}u=u.next}}else if(o.tag===10)a=o.type===t.type?null:o.child;else if(o.tag===18){if(a=o.return,a===null)throw Error(w(341));a.lanes|=n,i=a.alternate,i!==null&&(i.lanes|=n),Pa(a,n,t),a=o.sibling}else a=o.child;if(a!==null)a.return=o;else for(a=o;a!==null;){if(a===t){a=null;break}if(o=a.sibling,o!==null){o.return=a.return,a=o;break}a=a.return}o=a}he(e,t,l.children,n),t=t.child}return t;case 9:return l=t.type,r=t.pendingProps.children,jn(t,n),l=De(l),r=r(l),t.flags|=1,he(e,t,r,n),t.child;case 14:return r=t.type,l=He(r,t.pendingProps),l=He(r.type,l),ru(e,t,r,l,n);case 15:return vd(e,t,t.type,t.pendingProps,n);case 17:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:He(r,l),dl(e,t),t.tag=1,ke(r)?(e=!0,Cl(t)):e=!1,jn(t,n),pd(t,r,l),Ma(t,r,l,n),Ta(null,t,r,!0,e,n);case 19:return Sd(e,t,n);case 22:return yd(e,t,n)}throw Error(w(156,t.tag))};function Id(e,t){return cc(e,t)}function Mm(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function Fe(e,t,n,r){return new Mm(e,t,n,r)}function Os(e){return e=e.prototype,!(!e||!e.isReactComponent)}function zm(e){if(typeof e=="function")return Os(e)?1:0;if(e!=null){if(e=e.$$typeof,e===es)return 11;if(e===ts)return 14}return 2}function zt(e,t){var n=e.alternate;return n===null?(n=Fe(e.tag,t,e.key,e.mode),n.elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.type=e.type,n.flags=0,n.subtreeFlags=0,n.deletions=null),n.flags=e.flags&14680064,n.childLanes=e.childLanes,n.lanes=e.lanes,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,t=e.dependencies,n.dependencies=t===null?null:{lanes:t.lanes,firstContext:t.firstContext},n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function ml(e,t,n,r,l,o){var a=2;if(r=e,typeof e=="function")Os(e)&&(a=1);else if(typeof e=="string")a=5;else e:switch(e){case un:return Qt(n.children,l,o,t);case qa:a=8,l|=8;break;case ea:return e=Fe(12,n,t,l|2),e.elementType=ea,e.lanes=o,e;case ta:return e=Fe(13,n,t,l),e.elementType=ta,e.lanes=o,e;case na:return e=Fe(19,n,t,l),e.elementType=na,e.lanes=o,e;case Ku:return Zl(n,l,o,t);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case Wu:a=10;break e;case Qu:a=9;break e;case es:a=11;break e;case ts:a=14;break e;case gt:a=16,r=null;break e}throw Error(w(130,e==null?e:typeof e,""))}return t=Fe(a,n,t,l),t.elementType=e,t.type=r,t.lanes=o,t}function Qt(e,t,n,r){return e=Fe(7,e,r,t),e.lanes=n,e}function Zl(e,t,n,r){return e=Fe(22,e,r,t),e.elementType=Ku,e.lanes=n,e.stateNode={isHidden:!1},e}function Bo(e,t,n){return e=Fe(6,e,null,t),e.lanes=n,e}function Vo(e,t,n){return t=Fe(4,e.children!==null?e.children:[],e.key,t),t.lanes=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function Rm(e,t,n,r,l){this.tag=t,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=jo(0),this.expirationTimes=jo(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=jo(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function Fs(e,t,n,r,l,o,a,i,u){return e=new Rm(e,t,n,i,u),t===1?(t=1,o===!0&&(t|=8)):t=0,o=Fe(3,null,null,t),e.current=o,o.stateNode=e,o.memoizedState={element:r,isDehydrated:n,cache:null,transitions:null,pendingSuspenseBoundaries:null},xs(o),e}function Tm(e,t,n){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(Hd)}catch(e){console.error(e)}}Hd(),Hu.exports=Me;var Dm=Hu.exports,yu=Dm;Zo.createRoot=yu.createRoot,Zo.hydrateRoot=yu.hydrateRoot;/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Um=e=>e.replace(/([a-z0-9])([A-Z])/g,"$1-$2").toLowerCase(),Ad=(...e)=>e.filter((t,n,r)=>!!t&&t.trim()!==""&&r.indexOf(t)===n).join(" ").trim();/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */var $m={xmlns:"http://www.w3.org/2000/svg",width:24,height:24,viewBox:"0 0 24 24",fill:"none",stroke:"currentColor",strokeWidth:2,strokeLinecap:"round",strokeLinejoin:"round"};/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Hm=S.forwardRef(({color:e="currentColor",size:t=24,strokeWidth:n=2,absoluteStrokeWidth:r,className:l="",children:o,iconNode:a,...i},u)=>S.createElement("svg",{ref:u,...$m,width:t,height:t,stroke:e,strokeWidth:r?Number(n)*24/Number(t):n,className:Ad("lucide",l),...i},[...a.map(([f,h])=>S.createElement(f,h)),...Array.isArray(o)?o:[o]]));/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Re=(e,t)=>{const n=S.forwardRef(({className:r,...l},o)=>S.createElement(Hm,{ref:o,iconNode:t,className:Ad(`lucide-${Um(e)}`,r),...l}));return n.displayName=`${e}`,n};/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Gn=Re("ChevronDown",[["path",{d:"m6 9 6 6 6-6",key:"qrunsl"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Am=Re("Database",[["ellipse",{cx:"12",cy:"5",rx:"9",ry:"3",key:"msslwz"}],["path",{d:"M3 5V19A9 3 0 0 0 21 19V5",key:"1wlel7"}],["path",{d:"M3 12A9 3 0 0 0 21 12",key:"mv7ke4"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Bm=Re("FlaskConical",[["path",{d:"M10 2v7.527a2 2 0 0 1-.211.896L4.72 20.55a1 1 0 0 0 .9 1.45h12.76a1 1 0 0 0 .9-1.45l-5.069-10.127A2 2 0 0 1 14 9.527V2",key:"pzvekw"}],["path",{d:"M8.5 2h7",key:"csnxdl"}],["path",{d:"M7 16h10",key:"wp8him"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const gu=Re("Grid3x3",[["rect",{width:"18",height:"18",x:"3",y:"3",rx:"2",key:"afitv7"}],["path",{d:"M3 9h18",key:"1pudct"}],["path",{d:"M3 15h18",key:"5xshup"}],["path",{d:"M9 3v18",key:"fh3hqa"}],["path",{d:"M15 3v18",key:"14nvp0"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Vm=Re("History",[["path",{d:"M3 12a9 9 0 1 0 9-9 9.75 9.75 0 0 0-6.74 2.74L3 8",key:"1357e3"}],["path",{d:"M3 3v5h5",key:"1xhq8a"}],["path",{d:"M12 7v5l4 2",key:"1fdv2h"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Wm=Re("House",[["path",{d:"M15 21v-8a1 1 0 0 0-1-1h-4a1 1 0 0 0-1 1v8",key:"5wwlr5"}],["path",{d:"M3 10a2 2 0 0 1 .709-1.528l7-5.999a2 2 0 0 1 2.582 0l7 5.999A2 2 0 0 1 21 10v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2z",key:"1d0kgt"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Wo=Re("LoaderCircle",[["path",{d:"M21 12a9 9 0 1 1-6.219-8.56",key:"13zald"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Qm=Re("RefreshCw",[["path",{d:"M3 12a9 9 0 0 1 9-9 9.75 9.75 0 0 1 6.74 2.74L21 8",key:"v9h5vc"}],["path",{d:"M21 3v5h-5",key:"1q7to0"}],["path",{d:"M21 12a9 9 0 0 1-9 9 9.75 9.75 0 0 1-6.74-2.74L3 16",key:"3uifl3"}],["path",{d:"M8 16H3v5",key:"1cv678"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const xu=Re("RotateCcw",[["path",{d:"M3 12a9 9 0 1 0 9-9 9.75 9.75 0 0 0-6.74 2.74L3 8",key:"1357e3"}],["path",{d:"M3 3v5h5",key:"1xhq8a"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const wu=Re("Trash2",[["path",{d:"M3 6h18",key:"d0wm0j"}],["path",{d:"M19 6v14c0 1-1 2-2 2H7c-1 0-2-1-2-2V6",key:"4alrt4"}],["path",{d:"M8 6V4c0-1 1-2 2-2h4c1 0 2 1 2 2v2",key:"v07s0e"}],["line",{x1:"10",x2:"10",y1:"11",y2:"17",key:"1uufr5"}],["line",{x1:"14",x2:"14",y1:"11",y2:"17",key:"xtxkd"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Km=Re("Upload",[["path",{d:"M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4",key:"ih7n3h"}],["polyline",{points:"17 8 12 3 7 8",key:"t8dd8p"}],["line",{x1:"12",x2:"12",y1:"3",y2:"15",key:"widbto"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const Su=Re("ZoomIn",[["circle",{cx:"11",cy:"11",r:"8",key:"4ej97u"}],["line",{x1:"21",x2:"16.65",y1:"21",y2:"16.65",key:"13gj7c"}],["line",{x1:"11",x2:"11",y1:"8",y2:"14",key:"1vmskp"}],["line",{x1:"8",x2:"14",y1:"11",y2:"11",key:"durymu"}]]);/** + * @license lucide-react v0.460.0 - ISC + * + * This source code is licensed under the ISC license. + * See the LICENSE file in the root directory of this source tree. + */const ku=Re("ZoomOut",[["circle",{cx:"11",cy:"11",r:"8",key:"4ej97u"}],["line",{x1:"21",x2:"16.65",y1:"21",y2:"16.65",key:"13gj7c"}],["line",{x1:"8",x2:"14",y1:"11",y2:"11",key:"durymu"}]]),Z="/api";async function ht(e){if((e.headers.get("content-type")||"").includes("application/json"))return e.json();const n=await e.text();throw new Error(n||`HTTP ${e.status}`)}async function Gm(){const e=await fetch(`${Z}/analysis/uploads`);if(!e.ok)throw new Error(await e.text());return e.json()}async function Ym(e,t){const n=new URLSearchParams;t!=null&&t.deleteExperiments&&n.set("delete_experiments","true");const r=n.toString(),l=await fetch(`${Z}/analysis/uploads/${encodeURIComponent(e)}${r?`?${r}`:""}`,{method:"DELETE"});if(!l.ok)throw new Error(await l.text());return await l.json().catch(()=>({}))}async function Xm(e,t="analysis_upload"){const n=new FormData;n.append("file",e),n.append("source",t);const r=await fetch(`${Z}/analysis/upload`,{method:"POST",body:n}),l=await ht(r);if(!r.ok)throw new Error(String(l.detail||r.status));return l}async function Jm(e){const t=await fetch(`${Z}/analysis/uploads/import_from_debug`,{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({filename:e})}),n=await ht(t);if(!t.ok)throw new Error(String(n.detail||t.status));return n}async function Zm(e,t){const n=await fetch(`${Z}/analysis/solve/image`,{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({input_name:e,...t})}),r=await ht(n);if(!n.ok)throw new Error(String(r.detail||n.status));return r}async function qm(e,t){const n=await fetch(`${Z}/analysis/solve/batch`,{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({input_name:e,runs:t})}),r=await ht(n);if(!n.ok)throw new Error(String(r.detail||n.status));return r}async function Qo(e){const t=await fetch(`${Z}/analysis/presets?scope=${e}`);if(!t.ok)throw new Error(await t.text());return t.json()}async function eh(e,t){const n=await fetch(`${Z}/analysis/presets`,{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({name:e,params:t})}),r=await ht(n);if(!n.ok)throw new Error(String(r.detail||n.status));return r}async function Ko(e){const t=await fetch(`${Z}/analysis/experiments`,{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify(e)}),n=await ht(t);if(!t.ok)throw new Error(String(n.detail||t.status));return n}async function th(e){const t=await fetch(`${Z}/analysis/experiments/${encodeURIComponent(e)}`,{method:"DELETE"});if(!t.ok)throw new Error(await t.text())}async function Go(e,t,n=30){const r=new URLSearchParams({page:String(t),page_size:String(n)});e&&r.set("q",e);const l=await fetch(`${Z}/analysis/experiments?${r}`);if(!l.ok)throw new Error(await l.text());return l.json()}async function nh(){const e=await fetch(`${Z}/debug/files`);if(!e.ok)throw new Error(await e.text());return e.json()}function rh(e){return`${Z}/debug/files/${encodeURIComponent(e)}`}async function Nu(e){const t=await fetch(`${Z}/debug/files/${encodeURIComponent(e)}/info`),n=await ht(t);if(!t.ok)throw new Error(String(n.detail||t.status));return n}async function lh(e){const t=await fetch(`${Z}/analysis/uploads/${encodeURIComponent(e)}/info`),n=await ht(t);if(!t.ok)throw new Error(String(n.detail||t.status));return n}function oh(e){return`${Z}/analysis/uploads/file?filename=${encodeURIComponent(e)}`}async function ju(e){const t=await fetch(`${Z}/analysis/experiments/export?format=${e}`);if(!t.ok)throw new Error(await t.text());return t.text()}async function ah(e){const t=await fetch(`${Z}/analysis/uploads/${encodeURIComponent(e)}/experiment_count`);if(!t.ok)throw new Error(await t.text());return t.json()}async function _u(e){const t=await fetch(`${Z}/analysis/solve/frame`,{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify(e)}),n=await ht(t);if(!t.ok)throw new Error(String(n.detail||t.status));return n}function sh(e){return`${Z}/analysis/experiments/${encodeURIComponent(e)}/asset`}async function ih(){const e=await fetch(`${Z}/system/info`);if(!e.ok)throw new Error(await e.text());return e.json()}function Bd(e,t,n,r,l){if(r){if(e.clearRect(0,0,t,n),l.all&&Array.isArray(r.stars_all_centroids)){e.fillStyle="rgba(156, 163, 175, 0.85)";for(const o of r.stars_all_centroids)e.beginPath(),e.arc(o.x,o.y,2.4,0,Math.PI*2),e.fill()}if(l.pattern&&Array.isArray(r.stars_pattern)){e.strokeStyle="rgba(251, 146, 60, 0.95)",e.lineWidth=2;for(const o of r.stars_pattern)e.beginPath(),e.arc(o.x,o.y,6,0,Math.PI*2),e.stroke()}if(l.matched&&Array.isArray(r.stars_matched)){e.strokeStyle="rgba(34, 197, 94, 0.95)",e.fillStyle="rgba(34, 197, 94, 0.95)",e.lineWidth=2,e.font="11px system-ui, sans-serif";for(const o of r.stars_matched)e.beginPath(),e.arc(o.x,o.y,7,0,Math.PI*2),e.stroke(),o.mag!=null&&e.fillText(`m${Number(o.mag).toFixed(1)}`,o.x+4,o.y-4)}}}function Cu(e,t,n,r){if(!n)return;const l=t.naturalWidth||1,o=t.naturalHeight||1;e.width=l,e.height=o,e.style.width=`${t.clientWidth}px`,e.style.height=`${t.clientHeight}px`;const a=e.getContext("2d");a&&Bd(a,l,o,n,r)}function uh(e,t,n,r){if(!n)return;const l=t.videoWidth||1,o=t.videoHeight||1;if(l<2||o<2)return;e.width=l,e.height=o,e.style.width=`${t.clientWidth}px`,e.style.height=`${t.clientHeight}px`;const a=e.getContext("2d");a&&Bd(a,l,o,n,r)}const ch={"app.title":"OGScope Plate Solve Console","nav.lab":"Lab","nav.labImage":"Image solve","nav.labVideo":"Video solve","delete.uploadCascade":"Delete {n} linked experiment record(s) as well?","lab.solveCurrentFrame":"Solve current frame (file)","lab.cameraPreviewLoading":"Connecting to shared preview…","lab.solveCameraFrame":"Solve live camera frame","lab.solveCameraStart":"Start camera solve","lab.solveCameraStop":"Stop camera solve","lab.videoPreviewFailed":"This video format may be unsupported by the browser. Try MP4 (H.264) or WebM.","lab.previewModeFile":"Pool file","lab.previewModeCamera":"Device camera","lab.videoLiveIntro":"Shares the same camera as Camera Debug — preview and solve live frames here without opening debug. Both pages can run together.","lab.cameraSnapshotName":"ogscope_camera_live","lab.metric.probRaw":"Raw Prob","lab.systemLoad":"System load","results.saveBatchAll":"Save all to records","nav.pool":"Assets","nav.history":"Records","nav.cameraDebug":"Camera Debug","nav.home":"Home","lang.zh":"中文","lang.en":"EN","sidebar.assets":"Uploaded assets","sidebar.upload":"Upload","sidebar.refresh":"Refresh","sidebar.debugCaptures":"Debug console media","sidebar.assetTypeImage":"Image","sidebar.assetTypeVideo":"Video","sidebar.debugEmpty":"No debug files","sidebar.importToPool":"Import to pool","sidebar.debugPage":"Page {cur} / {total}","sidebar.batchPresets":"Batch presets","sidebar.batchHint":"Check presets, then use Batch solve to compare multiple param sets.","lab.selectOrUpload":"Pick an uploaded or imported asset from the left","lab.selectOrUploadVideo":"Pick a pool video to preview, or use the button above for the live camera frame.","lab.file":"File","lab.source":"Source","lab.layers":"Layers","lab.layer.matched":"Matched","lab.layer.pattern":"Pattern","lab.layer.all":"All centroids","lab.grid":"Grid","lab.zoomIn":"Zoom in","lab.zoomOut":"Zoom out","lab.zoomReset":"Reset","lab.resolution":"Resolution","lab.fwhm":"FWHM","lab.starsDetected":"Stars detected","lab.meta.title":"Capture & file info","lab.meta.noSidecar":"No sidecar (not from debug capture)","lab.meta.partial":"No detailed sidecar; file info only.","lab.solveSection":"Solve","lab.imageSection":"Image","lab.metric.solveMs":"Time","lab.metric.solveComputeMs":"Solve compute","lab.metric.solveComputeHelp":"Server-side Tetra3 + star extraction only (no network).","lab.metric.solveRoundTripMs":"End-to-end","lab.metric.solveRoundTripHelp":"From request start to UI updated: network + JSON + render.","lab.metric.backendTotalMs":"Backend total","lab.metric.openDecodeMs":"Open/decode","lab.metric.preprocessMs":"Preprocess","lab.metric.extractMs":"Extract","lab.metric.solveOnlyMs":"Solve match","lab.metric.probHelp":"Solver confidence (0–1); higher means a more trustworthy plate match.","lab.metric.probRawHelp":"Raw Tetra3 Prob (e.g. log-likelihood); compare with the normalized line above.","lab.metric.radec":"RA / Dec","lab.metric.matches":"Matches","lab.metric.rmse":"RMSE","lab.metric.prob":"Prob.","lab.metric.status":"Status","meta.exposure":"Exposure","meta.gain":"Gain","meta.fps":"FPS","meta.sensor":"Sensor","meta.colorMode":"Color","meta.outputResolution":"Output size","meta.fileTime":"File time","meta.fileSize":"File size","results.viewRaw":"Raw JSON","results.hideRaw":"Hide","params.title":"Solve parameters","params.blockSolveIntro":"Plate-solve (Tetra3): FOV, timeout, and coarse sky hints. FOV should match your lens.","params.centroid":"Star detection","params.blockCentroidIntro":"Star detection: threshold, blob area, and local background window for centroids.","params.fov":"FOV estimate (°)","params.fovHelp":"Horizontal field of view for lost-in-space solve.","params.fovErr":"FOV max error (°)","params.fovErrHelp":"Search range around estimated FOV.","params.timeout":"Timeout (ms)","params.timeoutHelp":"Max wait time per solve.","params.solveProfile":"Solve profile","params.solveProfileHelp":"Speed/Balanced/Robust tune timeout, centroid thresholds, and matching star count together.","params.solveProfileSpeed":"Speed first","params.solveProfileBalanced":"Balanced","params.solveProfileRobust":"Robust first","params.ra":"RA hint (°)","params.raHelp":"Approximate right ascension in degrees.","params.dec":"Dec hint (°)","params.decHelp":"Approximate declination in degrees.","params.maxSide":"Max long side before extract (px)","params.maxSideHelp":"Downscale long edge for faster centroid extraction.","params.detailLevelFull":"Include full Tetra3 raw block (larger payload, for debugging only).","params.largeScaleBg":"Large-scale background flattening","params.largeScaleBgHelp":"Before centroiding, estimate a low-frequency background on a downscaled image and correct uneven illumination (e.g. corner glow). Off by default to match legacy behavior.","params.sigma":"σ threshold","params.sigmaHelp":"Multiplier over background noise for star candidates.","params.maxArea":"max_area","params.maxAreaHelp":"Max connected component area in pixels.","params.minArea":"min_area","params.minAreaHelp":"Min connected component area in pixels.","params.filtsize":"filtsize (odd)","params.filtsizeHelp":"Local filter window size, must be odd.","btn.solveOne":"Solve once","btn.solveBatch":"Batch solve (presets)","btn.applyPresets":"Apply preset to form","btn.savePreset":"Save","placeholder.newPreset":"New preset name","pool.title":"Server asset pool","pool.col.name":"Filename","pool.col.source":"Source","pool.col.size":"Size","pool.col.time":"Modified","pool.delete":"Delete","history.title":"Experiment records","history.intro":"Saved solve snapshots from the Lab. After a solve, use Save to records in the Lab main panel (Result comparison), or Save on each batch result card. Search by filename or preset; export JSON/CSV for backup.","history.search":"Search…","history.searchBtn":"Search","history.exportJson":"Export JSON","history.exportCsv":"Export CSV","history.total":"Total {n}","history.preset":"Preset","history.metrics":"Metrics","history.detail":"Details","history.collapse":"Collapse","history.prev":"Prev","history.next":"Next","history.delete":"Delete","delete.uploadFirst":'Delete "{name}" from the asset pool?',"delete.uploadSecond":"This cannot be undone. Confirm again?","delete.experimentFirst":"Delete this experiment record?","delete.experimentSecond":"This cannot be undone. Confirm again?","results.title":"Results","results.saveCurrent":"Save to records","results.saveRow":"Save","results.expand":"Expand","results.collapseJson":"Collapse","err.selectFile":"Select a file","err.selectPresets":"Select at least one preset","common.placeholder":"—"},dh={"app.title":"OGScope 星空解算控制台","nav.lab":"解算台","nav.labImage":"图片解算","nav.labVideo":"视频解算","nav.pool":"素材池","nav.history":"实验记录","nav.cameraDebug":"相机调试控制台","nav.home":"首页","lang.zh":"中文","lang.en":"EN","sidebar.assets":"自行上传素材","sidebar.upload":"上传文件","sidebar.refresh":"刷新列表","sidebar.debugCaptures":"调试控制台素材","sidebar.assetTypeImage":"图片","sidebar.assetTypeVideo":"视频","sidebar.debugEmpty":"暂无调试文件","sidebar.importToPool":"导入到素材池","sidebar.debugPage":"第 {cur} / {total} 页","sidebar.batchPresets":"批量预设","sidebar.batchHint":"勾选后点击「批量解算」可一次用多组参数对比结果。","lab.selectOrUpload":"从左侧选择自行上传或已导入的素材","lab.selectOrUploadVideo":"从左侧选择视频素材预览;或使用上方按钮解算设备相机实时帧。","lab.file":"文件","lab.source":"来源","lab.layers":"叠加层","lab.layer.matched":"匹配星","lab.layer.pattern":"图案星","lab.layer.all":"全部质心","lab.grid":"网格","lab.zoomIn":"放大","lab.zoomOut":"缩小","lab.zoomReset":"复位","lab.resolution":"分辨率","lab.fwhm":"FWHM","lab.starsDetected":"检测星点","lab.meta.title":"拍摄与文件信息","lab.meta.noSidecar":"无侧车信息(非调试采集或仅本地上传)","lab.meta.partial":"暂无侧车详细字段,仅显示文件信息。","lab.solveSection":"解算","lab.imageSection":"图像","lab.metric.solveMs":"用时","lab.metric.solveComputeMs":"解算计算用时","lab.metric.solveComputeHelp":"服务端 Tetra3 与提星等纯计算耗时(与网络无关)。","lab.metric.solveRoundTripMs":"全链路用时","lab.metric.solveRoundTripHelp":"从本页发起请求到收到结果并完成界面刷新的总耗时,含网络往返与浏览器渲染。","lab.metric.backendTotalMs":"后端总用时","lab.metric.openDecodeMs":"读取/解码","lab.metric.preprocessMs":"预处理","lab.metric.extractMs":"提星","lab.metric.solveOnlyMs":"匹配解算","lab.metric.probHelp":"由解算器给出的匹配置信度(0–1),越高表示星图与天区匹配越可信。","lab.metric.probRawHelp":"Tetra3 返回的原始 Prob 字段,可能为对数似然等内部量;与上一行换算后的百分比对照查看即可。","lab.metric.radec":"RA / Dec","lab.metric.matches":"匹配","lab.metric.rmse":"RMSE","lab.metric.prob":"置信","lab.metric.status":"状态","meta.exposure":"曝光","meta.gain":"增益","meta.fps":"帧率","meta.sensor":"传感器","meta.colorMode":"色彩","meta.outputResolution":"输出分辨率","meta.fileTime":"文件时间","meta.fileSize":"文件大小","results.viewRaw":"原始 JSON","results.hideRaw":"收起","params.title":"解算参数","params.blockSolveIntro":"以下为板块求解(Tetra3)搜索天区、超时与粗略指向提示;FOV 需与镜头视场大致一致。","params.centroid":"提星","params.blockCentroidIntro":"以下为星点检测:阈值、连通域面积与局部背景窗口,用于从图像中提取星点质心。","params.fov":"FOV 估计 (°)","params.fovHelp":"水平视场角估计值,用于 lost-in-space 解算。","params.fovErr":"FOV 允许误差 (°)","params.fovErrHelp":"允许 Tetra3 在估计 FOV 附近的搜索范围。","params.timeout":"超时 (ms)","params.timeoutHelp":"单次解算最长等待时间。","params.solveProfile":"解算档位","params.solveProfileHelp":"速度/平衡/稳健三档会同时调整超时、提星阈值与参与匹配星点数。","params.solveProfileSpeed":"速度优先","params.solveProfileBalanced":"平衡","params.solveProfileRobust":"稳健优先","params.ra":"RA 提示 (°)","params.raHelp":"大致天球赤经,缩小搜索范围(度)。","params.dec":"Dec 提示 (°)","params.decHelp":"大致天球赤纬(度)。","params.maxSide":"提星前长边上界 (px)","params.maxSideHelp":"降采样长边上限,大图可加速提星。","params.detailLevelFull":"包含完整 Tetra3 原始结果(体积略大,仅调试时开启)","params.largeScaleBg":"大尺度背景减除","params.largeScaleBgHelp":"在提星前用低分辨率平滑估计并校正大尺度亮度不均,可减轻角部光晕导致的假星;默认关闭以保持与过往行为一致。","params.sigma":"σ(阈值倍数)","params.sigmaHelp":"高于背景噪声倍数的区域视为星点候选。","params.maxArea":"max_area","params.maxAreaHelp":"连通域最大像素面积。","params.minArea":"min_area","params.minAreaHelp":"连通域最小像素面积。","params.filtsize":"filtsize(奇数)","params.filtsizeHelp":"局部背景滤波窗口边长,须为奇数。","btn.solveOne":"单张解算","btn.solveBatch":"批量解算(勾选预设)","btn.applyPresets":"应用预设到表单","btn.savePreset":"保存","placeholder.newPreset":"新预设名称","pool.title":"服务器素材池","pool.col.name":"文件名","pool.col.source":"来源","pool.col.size":"大小","pool.col.time":"修改时间","pool.delete":"删除","history.title":"实验记录","history.intro":"此处展示你在解算台完成解算后手动保存的快照。用法:在「解算台」主栏「结果对比」中,单张解算后点「保存当前到实验记录」,或批量解算后在某张结果卡片上点「保存记录」。本页可按文件名或预设名搜索,支持导出 JSON/CSV 备份。","history.search":"搜索…","history.searchBtn":"搜索","history.exportJson":"导出 JSON","history.exportCsv":"导出 CSV","history.total":"共 {n} 条","history.preset":"预设","history.metrics":"指标","history.detail":"详情","history.collapse":"收起","history.prev":"上一页","history.next":"下一页","history.delete":"删除","delete.uploadFirst":"确定要从素材池删除「{name}」吗?","delete.uploadSecond":"此操作不可恢复,再次确认删除?","delete.experimentFirst":"确定要删除这条实验记录吗?","delete.experimentSecond":"此操作不可恢复,再次确认删除?","results.title":"结果对比","results.saveCurrent":"保存当前到实验记录","results.saveRow":"保存记录","results.expand":"展开","results.collapseJson":"收起","err.selectFile":"请选择素材","err.selectPresets":"请勾选至少一个预设","common.placeholder":"—","delete.uploadCascade":"该素材有 {n} 条实验记录,是否一并删除?","lab.solveCurrentFrame":"解算当前帧(文件)","lab.cameraPreviewLoading":"正在连接共享预览…","lab.solveCameraFrame":"解算相机当前帧","lab.solveCameraStart":"开始相机解算","lab.solveCameraStop":"停止相机解算","lab.videoPreviewFailed":"该视频格式可能不受浏览器支持,建议使用 MP4(H.264) 或 WebM。","lab.previewModeFile":"素材文件","lab.previewModeCamera":"设备相机","lab.videoLiveIntro":"与调试控制台共用同一相机;此处可预览并解算实时帧,无需单独打开调试页。两页可同时使用。","lab.cameraSnapshotName":"ogscope_camera_live","lab.metric.probRaw":"原始 Prob","lab.systemLoad":"系统负载","results.saveBatchAll":"保存全部到实验记录"},Vd=S.createContext(null),Eu={zh:dh,en:ch};function fh({children:e}){const[t,n]=S.useState("zh"),[r,l]=S.useState(Eu.zh);S.useEffect(()=>{l(Eu[t])},[t]);const o=S.useMemo(()=>(i,u)=>{let f=r[i]??i;if(u)for(const[h,y]of Object.entries(u))f=f.replace(new RegExp(`\\{${h}\\}`,"g"),String(y));return f},[r]),a=S.useMemo(()=>({locale:t,setLocale:n,t:o}),[t,o]);return s.jsx(Vd.Provider,{value:a,children:e})}function Wd(){const e=S.useContext(Vd);if(!e)throw new Error("useI18n must be used within I18nProvider");return e}function Wa(e){return e==null||Number.isNaN(e)?"—":e<1024?`${e} B`:e<1024*1024?`${(e/1024).toFixed(1)} KB`:`${(e/(1024*1024)).toFixed(2)} MB`}function hl(e,t){if(!e)return"—";try{const n=new Date(e);return new Intl.DateTimeFormat(t==="en"?"en-GB":"zh-CN",{dateStyle:"short",timeStyle:"medium"}).format(n)}catch{return e}}function Ut(e){return e==null?null:typeof e=="string"&&e.trim()?e.trim():typeof e=="number"&&!Number.isNaN(e)?String(e):null}function ph(e){return typeof e!="number"||e<=0?null:e>=1e6?`${(e/1e6).toFixed(2)} s`:e>=1e3?`${(e/1e3).toFixed(0)} ms`:`${e} µs`}function mh(e,t){if(!e)return[];const n=[],r=ph(e.exposure_us);r&&n.push({key:"meta.exposure",value:r});const l=Ut(e.analogue_gain),o=Ut(e.digital_gain);if(l||o){const v=[l?`A ${l}`:"",o?`D ${o}`:""].filter(Boolean);n.push({key:"meta.gain",value:v.join(" · ")})}const a=Ut(e.fps);a&&n.push({key:"meta.fps",value:a});const i=Ut(e.sensor);i&&n.push({key:"meta.sensor",value:i});const u=Ut(e.color_mode);u&&n.push({key:"meta.colorMode",value:u});const f=Ut(e.resolution);f&&n.push({key:"meta.outputResolution",value:f});const h=Ut(e.modified);h&&n.push({key:"meta.fileTime",value:hl(h,t)});const y=e.size;return typeof y=="number"&&y>0&&n.push({key:"meta.fileSize",value:Wa(y)}),n}function Qd(e){const t=n=>typeof n=="number"&&!Number.isNaN(n)?n:null;return e?{tSolveMs:t(e.t_solve_ms),tExtractMs:t(e.t_extract_ms),tPreprocessMs:t(e.t_preprocess_ms),tOpenDecodeMs:t(e.t_open_decode_ms),tBackendTotalMs:t(e.t_backend_total_ms),raDeg:t(e.ra_deg),decDeg:t(e.dec_deg),matches:t(e.matches),rmseArcsec:t(e.rmse_arcsec),prob:t(e.prob),status:typeof e.status=="string"?e.status:null}:{tSolveMs:null,tExtractMs:null,tPreprocessMs:null,tOpenDecodeMs:null,tBackendTotalMs:null,raDeg:null,decDeg:null,matches:null,rmseArcsec:null,prob:null,status:null}}function $l(e){return e==null?"—":`${e.toFixed(4)}°`}function hh(e){return e==null?"—":e>=0&&e<=1?`${(e*100).toFixed(1)}%`:String(e)}function vh(e){if(e==null)return"—";if(typeof e=="number"&&!Number.isNaN(e)){const t=Math.abs(e);return t>0&&t<.001?e.toExponential(4):t>=0&&t<=1?`${(e*100).toFixed(4)}%`:String(e)}return String(e)}function Cn(e,t){const n=t==null?void 0:t.tetra,r=n?n.Prob??n.prob:void 0;let l=hh(e);e!=null&&e>=0&&e<=1&&e>0&&e<1e-4&&(l=`${(e*100).toExponential(2)}%`),e===0&&r!==void 0&&r!==null&&(l="—");const o=r!=null?vh(r):null;return{line:l,rawLine:o}}const Yo=()=>({hint_ra_deg:45,hint_dec_deg:80,fov_estimate:11,fov_max_error:void 0,solve_timeout_ms:1500,solve_profile:"balanced",max_image_side:1600,large_scale_bg_subtract:!1,detail_level:"summary",centroid:{sigma:2.5,max_area:400,min_area:5,filtsize:25,binary_open:!0,max_axis_ratio:void 0}});function Xo(e){return e?{matches:e.matches,rmse_arcsec:e.rmse_arcsec,status:e.status,prob:e.prob,t_solve_ms:e.t_solve_ms}:{}}function yh(e){var n,r;if(!e)return null;const t=e.solve_overlay;return(n=t==null?void 0:t.stars_matched)!=null&&n.length?t.stars_matched.length:(r=t==null?void 0:t.stars_all_centroids)!=null&&r.length?t.stars_all_centroids.length:typeof e.matches=="number"?e.matches:null}const nl=30,Jo=6;function Pu(e){return/\.(jpe?g|png|webp|bmp|gif|fits?)$/i.test(e)}function an(e){return/\.(mp4|mov|webm|mkv|avi)$/i.test(e)}function gh(){var ei,ti,ni,ri,li,oi,ai,si;const{t:e,locale:t,setLocale:n}=Wd(),[r,l]=S.useState("lab_image"),[o,a]=S.useState([]),[i,u]=S.useState(null),[f,h]=S.useState(Yo),[y,v]=S.useState([]),[N,j]=S.useState([]),[_,$]=S.useState(new Set),[p,d]=S.useState(!1),[m,g]=S.useState(null),[k,b]=S.useState(null),[C,M]=S.useState(null),[U,L]=S.useState(""),[ne,et]=S.useState(1),[me,en]=S.useState(null),[Mr,tn]=S.useState(null),[It,E]=S.useState(""),[R,T]=S.useState({matched:!0,pattern:!0,all:!0}),[H,X]=S.useState([]),[je,Ke]=S.useState(null),[tt,_e]=S.useState(1),[nt,$s]=S.useState(!1),[nn,ro]=S.useState(1),[Hs,Kd]=S.useState({w:0,h:0}),[As,Gd]=S.useState({w:0,h:0}),[Bs,Yd]=S.useState({w:0,h:0}),[re,zr]=S.useState("file"),[lo,Vs]=S.useState(null),[Ws,oo]=S.useState(null),[ao,Qs]=S.useState(!1),[so,Rr]=S.useState(null),[io,Ks]=S.useState(!1),[Xd,uo]=S.useState({}),[Gs,co]=S.useState(!1),[fo,rt]=S.useState(null),[Jd,In]=S.useState(null),po=S.useRef(null),Dn=S.useRef(null),Ys=S.useRef(null),Tr=S.useRef(null),Lr=S.useRef(null),mo=S.useRef(!1),rn=S.useRef(null),[Or,Zd]=S.useState(null),Dt=S.useCallback(async()=>{const[c,x,z]=await Promise.all([Gm(),Qo("official"),Qo("user")]);a(c.files),v(x.presets),j(z.presets)},[]),ho=S.useCallback(()=>{nh().then(c=>X(c.files)).catch(()=>X([]))},[]);S.useEffect(()=>{Dt().catch(c=>g(String(c)))},[Dt]),S.useEffect(()=>{ho()},[ho]),S.useEffect(()=>{r==="history"&&Go(U,ne,nl).then(en).catch(c=>g(String(c)))},[r,U,ne]),S.useEffect(()=>{if(!i){Rr(null);return}let c=!1;return Ks(!0),(async()=>{try{const x=await lh(i);if(c)return;let z={...x};try{z={...await Nu(i),...x}}catch{}Rr(z)}catch{try{const x=await Nu(i);c||Rr(x)}catch{c||Rr(null)}}finally{c||Ks(!1)}})(),()=>{c=!0}},[i]),S.useEffect(()=>{b(null),M(null),uo({}),co(!1),rt(null),In(null),zr("file"),oo(null)},[i]);const lt=S.useMemo(()=>{const c=k==null?void 0:k.result;return c&&c.solve_overlay||null},[k]),vt=S.useMemo(()=>(k==null?void 0:k.result)??null,[k]),Xs=S.useMemo(()=>yh(vt),[vt]),vo=S.useMemo(()=>r==="lab_image"?Hs:r==="lab_video"?re==="file"?As:Bs:{w:0,h:0},[r,re,Hs,As,Bs]),Fr=i?oh(i):"";S.useEffect(()=>{if(r!=="lab_image")return;const c=po.current,x=rn.current;if(!c||!x||!lt||!i)return;const z=()=>Cu(x,c,lt,R);c.complete?z():c.onload=z},[lt,i,k,R,r]),S.useEffect(()=>{if(r!=="lab_video"||re!=="file")return;const c=Dn.current,x=rn.current;if(!c||!x||!lt||!i)return;const z=()=>uh(x,c,lt,R);return c.addEventListener("loadeddata",z),c.addEventListener("seeked",z),c.readyState>=2&&z(),()=>{c.removeEventListener("loadeddata",z),c.removeEventListener("seeked",z)}},[lt,R,r,re,i,k]),S.useEffect(()=>{if(r!=="lab_video"||re!=="camera")return;const c=Ys.current,x=rn.current;if(!c||!x||!lt)return;const z=()=>Cu(x,c,lt,R);c.complete?z():c.onload=z},[lt,R,r,re,k,lo]),S.useEffect(()=>{if(r!=="lab_video"||re!=="camera")return;let c=!1;const x=async()=>{if(!c)try{const le=Tr.current?`?since_frame_id=${encodeURIComponent(Tr.current)}`:"",Te=await fetch(`/api/camera/preview${le}`,{cache:"no-store"});if(Te.status===304||!Te.ok)return;const Dr=Te.headers.get("X-Frame-Id");Dr!=null&&(Tr.current=Dr);const uf=await Te.blob(),cf=URL.createObjectURL(uf);Vs(ii=>(ii&&URL.revokeObjectURL(ii),cf))}catch{}};x();const z=window.setInterval(()=>void x(),180);return()=>{c=!0,clearInterval(z),Vs(le=>(le&&URL.revokeObjectURL(le),null)),Tr.current=null}},[r,re]),S.useEffect(()=>{const c=po.current;if(!c)return;const x=()=>Kd({w:c.naturalWidth||0,h:c.naturalHeight||0});return x(),c.addEventListener("load",x),()=>c.removeEventListener("load",x)},[i,Fr]),S.useEffect(()=>{const c=Dn.current;if(!c)return;const x=()=>Gd({w:c.videoWidth||0,h:c.videoHeight||0});return c.addEventListener("loadedmetadata",x),c.addEventListener("loadeddata",x),x(),()=>{c.removeEventListener("loadedmetadata",x),c.removeEventListener("loadeddata",x)}},[i,Fr,r]);const qd=c=>{h({...Yo(),...c,centroid:{...Yo().centroid,...c.centroid}})},ef=async()=>{if(!i){g(e("err.selectFile"));return}g(null),d(!0);const c=performance.now();try{const x=await Zm(i,f);b(x),M(null),In("file"),rt(performance.now()-c)}catch(x){g(String(x)),rt(null)}finally{d(!1)}},tf=async()=>{if(!i){g(e("err.selectFile"));return}const c=[];for(const z of _){const Te=[...y,...N].find(Dr=>Dr.id===z);Te&&c.push({label:Te.name,params:structuredClone(Te.params)})}if(c.length===0){g(e("err.selectPresets"));return}g(null),d(!0);const x=performance.now();try{const z=await qm(i,c);M({results:z.results}),b(null),uo({}),In("file"),rt(performance.now()-x)}catch(z){g(String(z)),rt(null)}finally{d(!1)}},yo=async()=>{if(mo.current)return;g(null),d(!0),mo.current=!0;const c=performance.now();try{const x=await _u({source:"camera",...f});b(x),M(null),In("camera"),zr("camera"),rt(performance.now()-c)}catch(x){g(String(x)),rt(null)}finally{mo.current=!1,d(!1)}},nf=async()=>{if(!i)return;g(null),d(!0);const c=performance.now();try{const x=Dn.current,z=await _u({source:"file",input_name:i,time_sec:(x==null?void 0:x.currentTime)??0,...f});b(z),M(null),In("file"),rt(performance.now()-c)}catch(x){g(String(x)),rt(null)}finally{d(!1)}},rf=()=>{ao||(Qs(!0),yo(),Lr.current=window.setInterval(()=>{yo()},1200))},go=()=>{Qs(!1),Lr.current!=null&&(window.clearInterval(Lr.current),Lr.current=null)},lf=c=>{$(x=>{const z=new Set(x);return z.has(c)?z.delete(c):z.add(c),z})},Js=async c=>{if(!window.confirm(e("delete.uploadFirst",{name:c})))return;let x=0;try{x=(await ah(c)).count}catch{x=0}if(x>0){if(!window.confirm(e("delete.uploadCascade",{n:x})))return}else if(!window.confirm(e("delete.uploadSecond")))return;d(!0),g(null);try{await Ym(c,{deleteExperiments:x>0}),await Dt(),i===c&&u(null)}catch(z){g(String(z))}finally{d(!1)}},of=async c=>{if(window.confirm(e("delete.experimentFirst"))&&window.confirm(e("delete.experimentSecond"))){d(!0),g(null);try{await th(c),Mr===c&&tn(null);const x=await Go(U,ne,nl);en(x)}catch(x){g(String(x))}finally{d(!1)}}},Ir=c=>ro(Math.min(4,Math.max(.5,c))),Zs=Math.max(1,Math.ceil(((me==null?void 0:me.total)??0)/nl)),ln=S.useMemo(()=>r==="lab_image"?H.filter(c=>c.type==="image"||Pu(c.name)):r==="lab_video"?H.filter(c=>c.type==="video"||an(c.name)):H,[H,r]),Un=Math.max(1,Math.ceil(ln.length/Jo)),af=S.useMemo(()=>{const c=(tt-1)*Jo;return ln.slice(c,c+Jo)},[ln,tt]);S.useEffect(()=>{_e(1)},[r]),S.useEffect(()=>{_e(c=>Math.min(c,Un))},[Un]),S.useEffect(()=>{je&&!ln.some(c=>c.name===je)&&Ke(null)},[ln,je]);const qs=S.useMemo(()=>mh(so,t),[so,t]),sf=S.useMemo(()=>r==="lab_image"?o.filter(c=>Pu(c.filename)):r==="lab_video"?o.filter(c=>an(c.filename)):o,[o,r]),I=S.useMemo(()=>Qd(vt),[vt]);return S.useEffect(()=>{co(!1)},[k]),S.useEffect(()=>{if(r!=="lab_video")return;let c;const x=()=>{ih().then(Zd).catch(()=>{})};return x(),c=setInterval(x,1500),()=>{c&&clearInterval(c)}},[r]),S.useEffect(()=>{(r!=="lab_video"||re!=="camera")&&go()},[r,re]),S.useEffect(()=>()=>{go()},[]),s.jsxs("div",{className:"flex min-h-full flex-col bg-surface text-on-surface",children:[s.jsxs("header",{className:"flex h-12 shrink-0 items-center justify-between border-b border-outline-variant/20 px-4",children:[s.jsxs("div",{className:"flex items-center gap-6",children:[s.jsx("span",{className:"font-headline text-sm font-bold tracking-wide text-on-surface",children:e("app.title")}),s.jsxs("nav",{className:"flex flex-wrap gap-3 text-xs",children:[s.jsx("button",{type:"button",className:r==="lab_image"?"text-primary":"text-on-surface-variant",onClick:()=>l("lab_image"),children:e("nav.labImage")}),s.jsx("button",{type:"button",className:r==="lab_video"?"text-primary":"text-on-surface-variant",onClick:()=>l("lab_video"),children:e("nav.labVideo")}),s.jsx("button",{type:"button",className:r==="pool"?"text-primary":"text-on-surface-variant",onClick:()=>l("pool"),children:e("nav.pool")}),s.jsx("button",{type:"button",className:r==="history"?"text-primary":"text-on-surface-variant",onClick:()=>l("history"),children:e("nav.history")})]})]}),s.jsxs("div",{className:"flex items-center gap-2",children:[s.jsxs("div",{className:"mr-2 flex gap-1 text-[10px]",children:[s.jsx("button",{type:"button",className:`rounded px-2 py-0.5 ${t==="zh"?"bg-primary-container text-on-primary-container":"text-on-surface-variant"}`,onClick:()=>n("zh"),children:e("lang.zh")}),s.jsx("button",{type:"button",className:`rounded px-2 py-0.5 ${t==="en"?"bg-primary-container text-on-primary-container":"text-on-surface-variant"}`,onClick:()=>n("en"),children:e("lang.en")})]}),s.jsxs("a",{className:"flex items-center gap-1 rounded border border-outline-variant/30 px-2 py-1 text-xs text-on-surface-variant hover:bg-surface-container",href:"/debug",children:[s.jsx(Bm,{className:"h-3.5 w-3.5"})," ",e("nav.cameraDebug")]}),s.jsxs("a",{className:"flex items-center gap-1 rounded border border-outline-variant/30 px-2 py-1 text-xs text-on-surface-variant hover:bg-surface-container",href:"/",children:[s.jsx(Wm,{className:"h-3.5 w-3.5"})," ",e("nav.home")]})]})]}),s.jsxs("div",{className:"flex min-h-0 flex-1",children:[s.jsxs("aside",{className:"w-64 shrink-0 border-r border-outline-variant/15 bg-surface-container-lowest p-3 text-xs",children:[s.jsx("div",{className:"mb-3 font-semibold text-on-surface-variant",children:e("sidebar.assets")}),s.jsxs("label",{className:"mb-3 flex cursor-pointer items-center gap-2 rounded bg-primary-container/30 px-2 py-2 text-on-primary-container",children:[s.jsx(Km,{className:"h-4 w-4"}),s.jsx("span",{children:e("sidebar.upload")}),s.jsx("input",{type:"file",accept:"image/*,video/*",className:"hidden",onChange:async c=>{var z;const x=(z=c.target.files)==null?void 0:z[0];if(x){d(!0),g(null);try{const le=await Xm(x);await Dt(),u(le.filename)}catch(le){g(String(le))}finally{d(!1),c.target.value=""}}}})]}),s.jsxs("button",{type:"button",className:"mb-2 flex w-full items-center gap-1 text-left text-on-surface-variant hover:text-on-surface",onClick:()=>{Dt().catch(c=>g(String(c))),_e(1),ho()},children:[s.jsx(Qm,{className:"h-3 w-3"})," ",e("sidebar.refresh")]}),s.jsx("div",{className:"max-h-36 overflow-y-auto border-t border-outline-variant/10 pt-2",children:sf.map(c=>s.jsxs("div",{className:`mb-1 flex items-center gap-0.5 rounded px-1 ${i===c.filename?"bg-surface-container":""}`,children:[s.jsxs("button",{type:"button",className:`min-w-0 flex-1 truncate rounded px-1 py-1 text-left ${i===c.filename?"text-primary":""}`,title:c.filename,onClick:()=>{u(c.filename),l(an(c.filename)?"lab_video":"lab_image")},children:[s.jsx("span",{className:"block truncate",children:c.filename}),s.jsx("span",{className:"block text-[9px] text-on-surface-variant/90",children:an(c.filename)?e("sidebar.assetTypeVideo"):e("sidebar.assetTypeImage")})]}),s.jsx("button",{type:"button",className:"shrink-0 rounded p-1 text-on-surface-variant hover:bg-surface-container-high hover:text-error",title:e("pool.delete"),"aria-label":e("pool.delete"),onClick:x=>{x.stopPropagation(),Js(c.filename)},children:s.jsx(wu,{className:"h-3.5 w-3.5"})})]},c.filename))}),s.jsxs("div",{className:"mt-3 border-t border-outline-variant/10 pt-3",children:[s.jsxs("div",{className:"mb-2 flex items-start justify-between gap-2",children:[s.jsx("div",{className:"min-w-0 font-semibold leading-tight text-on-surface-variant",children:e("sidebar.debugCaptures")}),s.jsx("button",{type:"button",className:"shrink-0 rounded bg-primary px-2 py-1 text-[10px] font-medium text-on-primary disabled:opacity-40",disabled:!je,title:je??void 0,onClick:async()=>{if(je){d(!0);try{const c=await Jm(je);await Dt(),u(c.filename),l(an(c.filename)?"lab_video":"lab_image")}catch(c){g(String(c))}finally{d(!1)}}},children:e("sidebar.importToPool")})]}),ln.length===0?s.jsx("p",{className:"text-[10px] text-on-surface-variant",children:e("sidebar.debugEmpty")}):s.jsxs(s.Fragment,{children:[s.jsx("div",{className:"max-h-[min(22rem,55vh)] space-y-1.5 overflow-y-auto pr-0.5",children:af.map(c=>s.jsxs("button",{type:"button",className:`flex w-full items-center gap-2 rounded-md border px-2 py-1.5 text-left transition-colors ${je===c.name?"border-primary bg-primary-container/20":"border-outline-variant/25 hover:bg-surface-container"}`,onClick:()=>Ke(c.name),children:[c.type==="image"?s.jsx("img",{src:rh(c.name),alt:"",className:"h-11 w-11 shrink-0 rounded object-cover"}):s.jsx("div",{className:"flex h-11 w-11 shrink-0 items-center justify-center rounded bg-surface-container-high text-[9px] text-on-surface-variant",children:"video"}),s.jsxs("div",{className:"min-w-0 flex-1",children:[s.jsx("div",{className:"truncate font-mono text-[10px] text-on-surface",title:c.name,children:c.name}),s.jsxs("div",{className:"text-[9px] text-on-surface-variant",children:[c.type==="video"||an(c.name)?e("sidebar.assetTypeVideo"):e("sidebar.assetTypeImage")," ","· ",hl(c.modified,t)," · ",Wa(c.size)]})]})]},c.name))}),s.jsxs("div",{className:"mt-2 flex items-center justify-between gap-1 text-[10px] text-on-surface-variant",children:[s.jsx("button",{type:"button",className:"rounded border border-outline-variant/30 px-2 py-0.5 disabled:opacity-40",disabled:tt<=1,onClick:()=>_e(c=>Math.max(1,c-1)),children:e("history.prev")}),s.jsx("span",{className:"tabular-nums",children:e("sidebar.debugPage",{cur:tt,total:Un})}),s.jsx("button",{type:"button",className:"rounded border border-outline-variant/30 px-2 py-0.5 disabled:opacity-40",disabled:tt>=Un,onClick:()=>_e(c=>Math.min(Un,c+1)),children:e("history.next")})]})]})]})]}),(r==="lab_image"||r==="lab_video")&&s.jsxs("div",{className:"flex min-h-0 min-w-0 flex-1",children:[s.jsxs("main",{className:"min-h-0 min-w-0 flex-1 overflow-y-auto p-4",children:[m&&s.jsx("div",{className:"mb-2 rounded border border-error/40 bg-error-container/20 px-3 py-2 text-xs text-error",children:m}),r==="lab_video"&&s.jsxs("div",{className:"mb-3 max-w-5xl rounded-lg border border-outline-variant/25 bg-surface-container-low/80 p-3 text-[11px] leading-relaxed text-on-surface",children:[s.jsx("p",{className:"text-[10px] text-on-surface-variant",children:e("lab.videoLiveIntro")}),s.jsxs("div",{className:"mt-2 flex flex-wrap gap-2",children:[s.jsx("button",{type:"button",className:`rounded px-3 py-1.5 text-[11px] font-medium ${re==="file"?"bg-primary text-on-primary-container":"border border-outline-variant/40 bg-surface-container text-on-surface"}`,onClick:()=>zr("file"),children:e("lab.previewModeFile")}),s.jsx("button",{type:"button",className:`rounded px-3 py-1.5 text-[11px] font-medium ${re==="camera"?"bg-primary text-on-primary-container":"border border-outline-variant/40 bg-surface-container text-on-surface"}`,onClick:()=>zr("camera"),children:e("lab.previewModeCamera")})]})]}),s.jsxs("div",{className:"relative aspect-video w-full max-w-5xl overflow-hidden rounded-lg border border-outline-variant/20 bg-surface-container-lowest",children:[r==="lab_video"&&re==="camera"?s.jsx("div",{className:"relative flex h-full min-h-[220px] flex-col items-center justify-center gap-3 bg-black p-2",children:s.jsxs("div",{className:"relative inline-block max-h-[70vh] max-w-full",children:[lo?s.jsx("img",{ref:Ys,src:lo,alt:"",className:"max-h-[70vh] w-full min-h-[120px] object-contain",onLoad:c=>Yd({w:c.currentTarget.naturalWidth,h:c.currentTarget.naturalHeight})}):s.jsxs("div",{className:"flex min-h-[200px] w-full min-w-[280px] flex-col items-center justify-center gap-2 text-[11px] text-on-surface-variant",children:[s.jsx(Wo,{className:"h-8 w-8 animate-spin text-primary"}),s.jsx("span",{children:e("lab.cameraPreviewLoading")})]}),s.jsx("canvas",{ref:rn,className:"pointer-events-none absolute left-0 top-0"})]})}):i?s.jsx("div",{className:"relative h-full min-h-[200px] overflow-auto",children:r==="lab_video"?s.jsxs(s.Fragment,{children:[s.jsxs("div",{className:"absolute left-2 top-2 z-20 flex flex-wrap gap-1",children:[s.jsxs("button",{type:"button",title:e("lab.grid"),className:`rounded border px-2 py-1 text-[10px] ${nt?"border-primary bg-primary/20":"border-white/30 bg-black/40"} text-white`,onClick:()=>$s(c=>!c),children:[s.jsx(gu,{className:"mr-1 inline h-3 w-3"}),e("lab.grid")]}),s.jsx("button",{type:"button",title:e("lab.zoomOut"),className:"rounded border border-white/30 bg-black/40 px-2 py-1 text-[10px] text-white",onClick:()=>Ir(nn-.25),children:s.jsx(ku,{className:"inline h-3 w-3"})}),s.jsx("button",{type:"button",title:e("lab.zoomIn"),className:"rounded border border-white/30 bg-black/40 px-2 py-1 text-[10px] text-white",onClick:()=>Ir(nn+.25),children:s.jsx(Su,{className:"inline h-3 w-3"})}),s.jsx("button",{type:"button",title:e("lab.zoomReset"),className:"rounded border border-white/30 bg-black/40 px-2 py-1 text-[10px] text-white",onClick:()=>ro(1),children:s.jsx(xu,{className:"inline h-3 w-3"})})]}),s.jsxs("div",{className:"flex min-h-[200px] flex-col items-center justify-center gap-2 bg-black py-2",children:[s.jsx("div",{className:"inline-block origin-top-left transition-transform",style:{transform:`scale(${nn})`},children:s.jsxs("div",{className:"relative inline-block",children:[nt&&s.jsx("div",{className:"pointer-events-none absolute inset-0 z-[1]",style:{backgroundImage:["linear-gradient(to right, rgba(255,255,255,0.12) 1px, transparent 1px)","linear-gradient(to bottom, rgba(255,255,255,0.12) 1px, transparent 1px)"].join(","),backgroundSize:"48px 48px"}}),s.jsx("video",{ref:Dn,src:Fr,loop:!0,playsInline:!0,autoPlay:!0,muted:!0,preload:"metadata",controls:!0,className:"max-h-[70vh] w-full max-w-full object-contain",onError:()=>oo(e("lab.videoPreviewFailed")),onLoadedData:()=>{oo(null);const c=Dn.current;c&&c.play().catch(()=>{})}}),s.jsx("canvas",{ref:rn,className:"pointer-events-none absolute left-0 top-0"})]})}),Ws&&s.jsx("div",{className:"rounded border border-error/40 bg-error-container/20 px-3 py-1.5 text-[11px] text-error",children:Ws})]})]}):s.jsxs(s.Fragment,{children:[s.jsxs("div",{className:"absolute left-2 top-2 z-20 flex flex-wrap gap-1",children:[s.jsxs("button",{type:"button",title:e("lab.grid"),className:`rounded border px-2 py-1 text-[10px] ${nt?"border-primary bg-primary/20":"border-white/30 bg-black/40"} text-white`,onClick:()=>$s(c=>!c),children:[s.jsx(gu,{className:"mr-1 inline h-3 w-3"}),e("lab.grid")]}),s.jsx("button",{type:"button",title:e("lab.zoomOut"),className:"rounded border border-white/30 bg-black/40 px-2 py-1 text-[10px] text-white",onClick:()=>Ir(nn-.25),children:s.jsx(ku,{className:"inline h-3 w-3"})}),s.jsx("button",{type:"button",title:e("lab.zoomIn"),className:"rounded border border-white/30 bg-black/40 px-2 py-1 text-[10px] text-white",onClick:()=>Ir(nn+.25),children:s.jsx(Su,{className:"inline h-3 w-3"})}),s.jsx("button",{type:"button",title:e("lab.zoomReset"),className:"rounded border border-white/30 bg-black/40 px-2 py-1 text-[10px] text-white",onClick:()=>ro(1),children:s.jsx(xu,{className:"inline h-3 w-3"})})]}),s.jsx("div",{className:"inline-block origin-top-left transition-transform",style:{transform:`scale(${nn})`},children:s.jsxs("div",{className:"relative inline-block",children:[nt&&s.jsx("div",{className:"pointer-events-none absolute inset-0 z-[1]",style:{backgroundImage:["linear-gradient(to right, rgba(255,255,255,0.12) 1px, transparent 1px)","linear-gradient(to bottom, rgba(255,255,255,0.12) 1px, transparent 1px)"].join(","),backgroundSize:"48px 48px"}}),s.jsx("img",{ref:po,src:Fr,alt:"preview",className:"max-h-[70vh] w-full object-contain"}),s.jsx("canvas",{ref:rn,className:"pointer-events-none absolute left-0 top-0"})]})})]})}):s.jsx("div",{className:"flex h-full items-center justify-center px-4 text-center text-on-surface-variant",children:e(r==="lab_video"?"lab.selectOrUploadVideo":"lab.selectOrUpload")}),p&&s.jsx("div",{className:"absolute inset-0 flex items-center justify-center bg-black/40",children:s.jsx(Wo,{className:"h-8 w-8 animate-spin text-primary"})})]}),(i&&r==="lab_image"||r==="lab_video"&&(re==="file"&&i||re==="camera"))&&s.jsxs("div",{className:"mt-2 flex flex-wrap items-center gap-3 rounded-lg border border-outline-variant/25 bg-surface-container-lowest/90 px-3 py-2 text-xs text-on-surface",children:[s.jsx("span",{className:"shrink-0 font-medium text-on-surface-variant",children:e("lab.layers")}),s.jsx("div",{className:"flex flex-wrap gap-x-4 gap-y-1",children:["matched","pattern","all"].map(c=>s.jsxs("label",{className:"flex cursor-pointer items-center gap-1",children:[s.jsx("input",{type:"checkbox",className:"accent-primary",checked:R[c],onChange:x=>T(z=>({...z,[c]:x.target.checked}))}),s.jsx("span",{children:e(c==="matched"?"lab.layer.matched":c==="pattern"?"lab.layer.pattern":"lab.layer.all")})]},c))})]}),(i||r==="lab_video"&&re==="camera")&&s.jsxs(s.Fragment,{children:[s.jsxs("div",{className:"mt-2 grid gap-2 sm:grid-cols-2",children:[s.jsxs("details",{open:!0,className:"rounded-lg border border-outline-variant/25 bg-surface-container-lowest/90",children:[s.jsxs("summary",{className:"flex cursor-pointer list-none items-center justify-between gap-2 px-3 py-2 text-xs font-semibold text-on-surface [&::-webkit-details-marker]:hidden",children:[e("lab.solveSection"),s.jsx(Gn,{className:"h-4 w-4 shrink-0 text-on-surface-variant"})]}),s.jsx("div",{className:"border-t border-outline-variant/15 px-3 py-2 text-[10px]",children:vt?s.jsxs("div",{className:"space-y-1 text-on-surface",children:[I.tSolveMs!=null&&s.jsxs("div",{className:"space-y-0.5",children:[s.jsxs("div",{className:"flex justify-between gap-2",children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.metric.solveComputeMs")}),s.jsxs("span",{className:"font-mono tabular-nums",children:[I.tSolveMs.toFixed(0)," ms"]})]}),s.jsx("p",{className:"text-[8px] leading-snug text-on-surface-variant/90",children:e("lab.metric.solveComputeHelp")})]}),fo!=null&&s.jsxs("div",{className:"space-y-0.5 border-t border-outline-variant/10 pt-1",children:[s.jsxs("div",{className:"flex justify-between gap-2",children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.metric.solveRoundTripMs")}),s.jsxs("span",{className:"font-mono tabular-nums",children:[fo.toFixed(0)," ms"]})]}),s.jsx("p",{className:"text-[8px] leading-snug text-on-surface-variant/90",children:e("lab.metric.solveRoundTripHelp")})]}),(I.tBackendTotalMs!=null||I.tOpenDecodeMs!=null||I.tPreprocessMs!=null||I.tExtractMs!=null||I.tSolveMs!=null)&&s.jsxs("div",{className:"space-y-0.5 border-t border-outline-variant/10 pt-1",children:[s.jsxs("div",{className:"flex justify-between gap-2",children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.metric.backendTotalMs")}),s.jsx("span",{className:"font-mono tabular-nums",children:I.tBackendTotalMs!=null?`${I.tBackendTotalMs.toFixed(0)} ms`:e("common.placeholder")})]}),s.jsxs("div",{className:"flex flex-wrap gap-x-2 gap-y-0.5 text-[8px] text-on-surface-variant/90",children:[s.jsxs("span",{children:[e("lab.metric.openDecodeMs"),":"," ",I.tOpenDecodeMs!=null?`${I.tOpenDecodeMs.toFixed(0)} ms`:e("common.placeholder")]}),s.jsxs("span",{children:[e("lab.metric.preprocessMs"),":"," ",I.tPreprocessMs!=null?`${I.tPreprocessMs.toFixed(0)} ms`:e("common.placeholder")]}),s.jsxs("span",{children:[e("lab.metric.extractMs"),":"," ",I.tExtractMs!=null?`${I.tExtractMs.toFixed(0)} ms`:e("common.placeholder")]}),s.jsxs("span",{children:[e("lab.metric.solveOnlyMs"),":"," ",I.tSolveMs!=null?`${I.tSolveMs.toFixed(0)} ms`:e("common.placeholder")]})]})]}),(I.raDeg!=null||I.decDeg!=null)&&s.jsxs("div",{className:"leading-tight",children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.metric.radec")}),s.jsxs("div",{className:"mt-0.5 font-mono text-[9px]",children:["α ",$l(I.raDeg)," · δ"," ",$l(I.decDeg)]})]}),s.jsxs("div",{className:"flex flex-wrap gap-x-2 gap-y-0.5 text-[9px]",children:[I.matches!=null&&s.jsxs("span",{children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.metric.matches")})," ",s.jsx("span",{className:"font-mono",children:I.matches})]}),I.rmseArcsec!=null&&s.jsxs("span",{children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.metric.rmse")})," ",s.jsxs("span",{className:"font-mono",children:[I.rmseArcsec.toFixed(2),"″"]})]}),I.prob!=null&&s.jsxs("span",{className:"inline-flex max-w-full flex-col gap-0.5",children:[s.jsxs("span",{children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.metric.prob")})," ",s.jsx("span",{className:"font-mono",children:Cn(I.prob,vt??void 0).line})]}),s.jsx("span",{className:"text-[8px] leading-snug text-on-surface-variant/90",children:e("lab.metric.probHelp")}),Cn(I.prob,vt??void 0).rawLine&&s.jsxs(s.Fragment,{children:[s.jsxs("span",{className:"text-[8px] text-on-surface-variant",children:[e("lab.metric.probRaw"),":"," ",Cn(I.prob,vt??void 0).rawLine]}),s.jsx("span",{className:"text-[8px] leading-snug text-on-surface-variant/90",children:e("lab.metric.probRawHelp")})]})]})]}),I.status&&s.jsxs("div",{className:"border-t border-outline-variant/15 pt-1 text-[9px]",children:[s.jsxs("span",{className:"text-on-surface-variant",children:[e("lab.metric.status")," "]}),s.jsx("span",{className:"font-mono text-secondary",children:I.status})]})]}):s.jsx("p",{className:"text-on-surface-variant",children:e("common.placeholder")})})]}),s.jsxs("details",{open:!0,className:"rounded-lg border border-outline-variant/25 bg-surface-container-lowest/90",children:[s.jsxs("summary",{className:"flex cursor-pointer list-none items-center justify-between gap-2 px-3 py-2 text-xs font-semibold text-on-surface [&::-webkit-details-marker]:hidden",children:[e("lab.imageSection"),s.jsx(Gn,{className:"h-4 w-4 shrink-0 text-on-surface-variant"})]}),s.jsxs("div",{className:"space-y-0.5 border-t border-outline-variant/15 px-3 py-2 text-[10px]",children:[s.jsxs("div",{className:"flex justify-between gap-2",children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.resolution")}),s.jsx("span",{className:"font-mono tabular-nums",children:vo.w>0?`${vo.w}×${vo.h}`:e("common.placeholder")})]}),s.jsxs("div",{className:"flex justify-between gap-2",children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.starsDetected")}),s.jsx("span",{className:"font-mono tabular-nums",children:Xs??e("common.placeholder")})]}),s.jsxs("div",{className:"flex justify-between gap-2",children:[s.jsx("span",{className:"text-on-surface-variant",children:e("lab.fwhm")}),s.jsx("span",{children:e("common.placeholder")})]})]})]})]}),i&&s.jsxs(s.Fragment,{children:[s.jsxs("div",{className:"mt-2 flex flex-wrap gap-4 text-xs text-on-surface-variant",children:[s.jsxs("span",{children:[e("lab.file"),":"," ",s.jsx("span",{className:"font-mono text-on-surface",children:i})]}),((ei=o.find(c=>c.filename===i))==null?void 0:ei.source)&&s.jsxs("span",{className:"rounded bg-surface-container-high px-2 py-0.5",children:[e("lab.source"),":"," ",(ti=o.find(c=>c.filename===i))==null?void 0:ti.source]})]}),s.jsxs("details",{open:!0,className:"mt-2 rounded-lg border border-outline-variant/25 bg-surface-container-lowest/90 text-xs shadow-sm",children:[s.jsxs("summary",{className:"flex cursor-pointer list-none items-center gap-2 px-3 py-2 font-semibold text-on-surface [&::-webkit-details-marker]:hidden",children:[e("lab.meta.title"),io&&s.jsx(Wo,{className:"h-3.5 w-3.5 animate-spin"}),s.jsx(Gn,{className:"ml-auto h-4 w-4 shrink-0 text-on-surface-variant"})]}),s.jsx("div",{className:"border-t border-outline-variant/15 p-3 pt-2",children:qs.length>0?s.jsx("dl",{className:"grid grid-cols-2 gap-x-4 gap-y-2 sm:grid-cols-3",children:qs.map(c=>s.jsxs("div",{children:[s.jsx("dt",{className:"text-[10px] text-on-surface-variant",children:e(c.key)}),s.jsx("dd",{className:"text-[11px] font-medium text-on-surface",children:c.value})]},`${c.key}-${c.value}`))}):so&&!io?s.jsx("p",{className:"text-[10px] leading-relaxed text-on-surface-variant",children:e("lab.meta.partial")}):io?null:s.jsx("p",{className:"text-[10px] text-on-surface-variant",children:e("lab.meta.noSidecar")})})]})]})]}),(i||r==="lab_video"&&re==="camera")&&(k||C)&&s.jsxs("section",{className:"mt-3 max-h-[min(50vh,28rem)] rounded-lg border border-outline-variant/25 bg-surface-container-lowest/95",children:[s.jsxs("div",{className:"flex h-9 shrink-0 items-center justify-between border-b border-outline-variant/15 px-3 text-[10px] uppercase text-on-surface-variant",children:[s.jsx("span",{children:e("results.title")}),s.jsxs("div",{className:"flex gap-3",children:[(ni=C==null?void 0:C.results)!=null&&ni.length?s.jsx("button",{type:"button",className:"rounded bg-surface-container px-2 py-0.5 normal-case",onClick:async()=>{if(!(!i||!C)){for(const c of C.results){if(!c.success)continue;const x=c.result;await Ko({input_name:i,preset_label:String(c.label),result_json:c,metrics:Xo(x??null),replay:{layers:R,params:f}})}g(null)}},children:e("results.saveBatchAll")}):null,k&&s.jsx("button",{type:"button",className:"rounded bg-surface-container px-2 py-0.5 normal-case",onClick:async()=>{if(!i&&Jd!=="camera")return;const c=k.result;await Ko({input_name:i??e("lab.cameraSnapshotName"),preset_label:"manual",result_json:k,metrics:Xo(c??null),replay:{layers:R,params:f}}),g(null)},children:e("results.saveCurrent")})]})]}),s.jsx("div",{className:"min-h-0 overflow-y-auto p-3",children:s.jsxs("div",{className:"flex gap-3 overflow-x-auto text-xs",children:[C==null?void 0:C.results.map((c,x)=>{const z=c.result,le=Xd[x]??!1;return s.jsxs("div",{className:"min-w-[15rem] max-w-sm shrink-0 rounded-lg border border-outline-variant/25 bg-surface-container p-3 shadow-sm",children:[s.jsx("div",{className:"border-b border-outline-variant/15 pb-2 font-semibold text-secondary",children:String(c.label)}),c.success?s.jsxs(s.Fragment,{children:[s.jsx(bu,{result:z,t:e,roundTripMs:null}),s.jsx("button",{type:"button",className:"mt-2 text-[10px] text-primary hover:underline",onClick:()=>uo(Te=>({...Te,[x]:!le})),children:e(le?"results.hideRaw":"results.viewRaw")}),le&&s.jsx("pre",{className:"mt-1 max-h-40 overflow-auto rounded bg-surface-container-highest p-2 text-[9px] text-on-surface-variant",children:JSON.stringify(c,null,2)})]}):s.jsx("div",{className:"mt-2 text-[10px] text-error",children:String(c.error)}),c.success&&i&&s.jsx("button",{type:"button",className:"mt-3 w-full rounded bg-surface-container-high py-1.5 text-[10px] font-medium",onClick:()=>Ko({input_name:i,preset_label:String(c.label),result_json:c,metrics:Xo(z??null),replay:{layers:R,params:f}}).catch(Te=>g(String(Te))),children:e("results.saveRow")})]},x)}),k&&!C&&s.jsxs("div",{className:"min-w-[16rem] max-w-md shrink-0 rounded-lg border border-outline-variant/25 bg-surface-container p-3 shadow-sm",children:[s.jsx("div",{className:"border-b border-outline-variant/15 pb-2 text-[10px] font-semibold uppercase text-on-surface-variant",children:e("results.title")}),s.jsx(bu,{result:k.result,t:e,roundTripMs:fo}),s.jsx("button",{type:"button",className:"mt-2 text-[10px] text-primary hover:underline",onClick:()=>co(c=>!c),children:e(Gs?"results.hideRaw":"results.viewRaw")}),Gs&&s.jsx("pre",{className:"mt-1 max-h-48 overflow-auto rounded bg-surface-container-highest p-2 text-[9px] text-on-surface-variant",children:JSON.stringify(k,null,2)})]})]})})]})]}),s.jsxs("aside",{className:"flex w-80 shrink-0 flex-col border-l border-outline-variant/15 bg-surface-container-low text-xs min-h-0",children:[r!=="lab_video"&&s.jsxs("div",{className:"shrink-0 space-y-2 border-b border-outline-variant/20 p-4 pb-3",children:[s.jsx("button",{type:"button",className:"w-full rounded bg-primary py-2.5 font-semibold text-on-primary-container",onClick:ef,disabled:p,children:e("btn.solveOne")}),s.jsx("button",{type:"button",className:"w-full rounded bg-secondary/80 py-2.5 font-semibold text-on-secondary",onClick:tf,disabled:p,children:e("btn.solveBatch")})]}),r==="lab_video"&&s.jsx("div",{className:"shrink-0 space-y-2 border-b border-outline-variant/20 p-4 pb-3",children:re==="file"?s.jsx("button",{type:"button",className:"w-full rounded bg-secondary/80 py-2.5 font-semibold text-on-secondary disabled:opacity-40",onClick:()=>void nf(),disabled:p||!i,children:e("lab.solveCurrentFrame")}):s.jsxs(s.Fragment,{children:[s.jsx("button",{type:"button",className:"w-full rounded bg-primary py-2.5 font-semibold text-on-primary-container disabled:opacity-40",onClick:()=>rf(),disabled:p||ao,children:e("lab.solveCameraStart")}),s.jsx("button",{type:"button",className:"w-full rounded bg-error/80 py-2.5 font-semibold text-on-error disabled:opacity-40",onClick:()=>go(),disabled:!ao,children:e("lab.solveCameraStop")}),s.jsx("button",{type:"button",className:"w-full rounded bg-secondary/80 py-2.5 font-semibold text-on-secondary disabled:opacity-40",onClick:()=>void yo(),disabled:p,children:e("lab.solveCameraFrame")})]})}),r==="lab_video"&&Or&&s.jsxs("div",{className:"shrink-0 border-b border-outline-variant/20 px-4 py-2 text-[10px] text-on-surface-variant",children:[s.jsx("div",{className:"font-medium text-on-surface",children:e("lab.systemLoad")}),s.jsxs("div",{children:["CPU ",Or.cpu_usage,"% · RAM ",Or.memory_usage,"% ·"," ",Or.temperature,"°C"]})]}),s.jsxs("div",{className:"min-h-0 flex-1 overflow-y-auto p-4",children:[s.jsxs("details",{open:!0,className:"rounded-lg border border-outline-variant/20 bg-surface-container-highest/30",children:[s.jsxs("summary",{className:"flex cursor-pointer list-none items-center justify-between gap-2 px-3 py-2 font-semibold text-on-surface-variant [&::-webkit-details-marker]:hidden",children:[e("params.title"),s.jsx(Gn,{className:"h-4 w-4 shrink-0"})]}),s.jsxs("div",{className:"border-t border-outline-variant/15 p-3 pt-2",children:[s.jsx("p",{className:"mb-3 text-[10px] leading-relaxed text-on-surface-variant/90",children:e("params.blockSolveIntro")}),s.jsxs("div",{className:"space-y-2",children:[s.jsxs("label",{className:"block",children:[s.jsx("span",{className:"text-[10px] font-medium text-on-surface-variant",children:e("params.solveProfile")}),s.jsx("p",{className:"mb-1 mt-0.5 text-[9px] leading-snug text-on-surface-variant/85",children:e("params.solveProfileHelp")}),s.jsxs("select",{className:"w-full rounded bg-surface-container-highest px-2 py-1",value:f.solve_profile??"balanced",onChange:c=>h(x=>({...x,solve_profile:c.target.value})),children:[s.jsx("option",{value:"speed",children:e("params.solveProfileSpeed")}),s.jsx("option",{value:"balanced",children:e("params.solveProfileBalanced")}),s.jsx("option",{value:"robust",children:e("params.solveProfileRobust")})]})]}),s.jsx(Ye,{label:e("params.fov"),helpKey:"params.fovHelp",value:f.fov_estimate??"",onChange:c=>h(x=>({...x,fov_estimate:c}))}),s.jsx(Ye,{label:e("params.fovErr"),helpKey:"params.fovErrHelp",value:f.fov_max_error??"",onChange:c=>h(x=>({...x,fov_max_error:c}))}),s.jsx(Ye,{label:e("params.timeout"),helpKey:"params.timeoutHelp",value:f.solve_timeout_ms??"",onChange:c=>h(x=>({...x,solve_timeout_ms:c}))}),s.jsx(Ye,{label:e("params.ra"),helpKey:"params.raHelp",value:f.hint_ra_deg??"",onChange:c=>h(x=>({...x,hint_ra_deg:c}))}),s.jsx(Ye,{label:e("params.dec"),helpKey:"params.decHelp",value:f.hint_dec_deg??"",onChange:c=>h(x=>({...x,hint_dec_deg:c}))}),s.jsx(Ye,{label:e("params.maxSide"),helpKey:"params.maxSideHelp",value:f.max_image_side??"",onChange:c=>h(x=>({...x,max_image_side:c}))}),s.jsxs("label",{className:"mt-1.5 flex cursor-pointer items-center gap-2",children:[s.jsx("input",{type:"checkbox",className:"accent-primary",checked:f.detail_level==="full",onChange:c=>h(x=>({...x,detail_level:c.target.checked?"full":"summary"}))}),s.jsx("span",{className:"text-[10px] text-on-surface-variant",children:e("params.detailLevelFull")})]}),s.jsxs("label",{className:"mt-1.5 flex cursor-pointer items-start gap-2",children:[s.jsx("input",{type:"checkbox",className:"accent-primary mt-0.5",checked:!!f.large_scale_bg_subtract,onChange:c=>h(x=>({...x,large_scale_bg_subtract:c.target.checked}))}),s.jsxs("span",{children:[s.jsx("span",{className:"text-[10px] text-on-surface-variant",children:e("params.largeScaleBg")}),s.jsx("p",{className:"mt-0.5 text-[9px] leading-snug text-on-surface-variant/85",children:e("params.largeScaleBgHelp")})]})]})]})]})]}),s.jsxs("details",{open:!0,className:"mt-3 rounded-lg border border-outline-variant/20 bg-surface-container-highest/30",children:[s.jsxs("summary",{className:"flex cursor-pointer list-none items-center justify-between gap-2 px-3 py-2 font-semibold text-on-surface-variant [&::-webkit-details-marker]:hidden",children:[e("params.centroid"),s.jsx(Gn,{className:"h-4 w-4 shrink-0"})]}),s.jsxs("div",{className:"border-t border-outline-variant/15 p-3 pt-2",children:[s.jsx("p",{className:"mb-3 text-[10px] leading-relaxed text-on-surface-variant/90",children:e("params.blockCentroidIntro")}),s.jsxs("div",{className:"space-y-2",children:[s.jsx(Ye,{label:e("params.sigma"),helpKey:"params.sigmaHelp",step:.1,value:((ri=f.centroid)==null?void 0:ri.sigma)??"",onChange:c=>h(x=>({...x,centroid:{...x.centroid,sigma:c}}))}),s.jsx(Ye,{label:e("params.maxArea"),helpKey:"params.maxAreaHelp",value:((li=f.centroid)==null?void 0:li.max_area)??"",onChange:c=>h(x=>({...x,centroid:{...x.centroid,max_area:c}}))}),s.jsx(Ye,{label:e("params.minArea"),helpKey:"params.minAreaHelp",value:((oi=f.centroid)==null?void 0:oi.min_area)??"",onChange:c=>h(x=>({...x,centroid:{...x.centroid,min_area:c}}))}),s.jsx(Ye,{label:e("params.filtsize"),helpKey:"params.filtsizeHelp",step:2,value:((ai=f.centroid)==null?void 0:ai.filtsize)??"",onChange:c=>h(x=>({...x,centroid:{...x.centroid,filtsize:c}}))})]})]})]}),s.jsxs("div",{className:"mt-4 rounded-md border border-outline-variant/20 bg-surface-container-highest/40 p-3",children:[s.jsx("div",{className:"mb-2 font-semibold text-on-surface-variant",children:e("sidebar.batchPresets")}),s.jsx("p",{className:"mb-2 text-[10px] leading-snug text-on-surface-variant",children:e("sidebar.batchHint")}),s.jsx("div",{className:"max-h-36 space-y-1.5 overflow-y-auto",children:[...y,...N].map(c=>s.jsxs("label",{className:"flex cursor-pointer items-center gap-2",children:[s.jsx("input",{type:"checkbox",checked:_.has(c.id),onChange:()=>lf(c.id)}),s.jsx("span",{className:"truncate",children:c.name})]},c.id))})]}),s.jsxs("div",{className:"mt-6 border-t border-outline-variant/20 pt-4",children:[s.jsx("div",{className:"mb-2 font-semibold",children:e("btn.applyPresets")}),s.jsx("div",{className:"max-h-24 space-y-1 overflow-y-auto",children:[...y,...N].map(c=>s.jsx("button",{type:"button",className:"block w-full truncate text-left text-primary hover:underline",onClick:()=>qd(c.params),children:c.name},c.id))}),s.jsxs("div",{className:"mt-3 flex gap-1",children:[s.jsx("input",{className:"flex-1 rounded bg-surface-container-highest px-2 py-1",placeholder:e("placeholder.newPreset"),value:It,onChange:c=>E(c.target.value)}),s.jsx("button",{type:"button",className:"rounded bg-surface-container-high px-2",onClick:async()=>{if(It.trim()){d(!0);try{await eh(It.trim(),f),E(""),await Dt();const c=await Qo("user");j(c.presets)}catch(c){g(String(c))}finally{d(!1)}}},children:e("btn.savePreset")})]})]})]})]})]}),r==="pool"&&s.jsxs("main",{className:"flex-1 overflow-auto p-4",children:[s.jsxs("h2",{className:"mb-4 flex items-center gap-2 text-lg font-semibold",children:[s.jsx(Am,{className:"h-5 w-5"})," ",e("pool.title")]}),s.jsxs("table",{className:"w-full text-left text-xs",children:[s.jsx("thead",{children:s.jsxs("tr",{className:"border-b border-outline-variant/30 text-on-surface-variant",children:[s.jsx("th",{className:"py-2",children:e("pool.col.name")}),s.jsx("th",{className:"py-2",children:e("pool.col.source")}),s.jsx("th",{className:"py-2",children:e("pool.col.size")}),s.jsx("th",{className:"py-2",children:e("pool.col.time")}),s.jsx("th",{className:"w-16 py-2 text-center",children:e("pool.delete")})]})}),s.jsx("tbody",{children:o.map(c=>s.jsxs("tr",{className:"border-b border-outline-variant/10",children:[s.jsx("td",{className:"py-2 font-mono",children:c.filename}),s.jsx("td",{className:"py-2",children:c.source??e("common.placeholder")}),s.jsx("td",{className:"py-2",children:Wa(c.size)}),s.jsx("td",{className:"py-2",children:hl(c.modified_at,t)}),s.jsx("td",{className:"py-2 text-center",children:s.jsx("button",{type:"button",className:"inline-flex rounded p-1 text-on-surface-variant hover:bg-error-container/30 hover:text-error",title:e("pool.delete"),"aria-label":e("pool.delete"),onClick:()=>void Js(c.filename),children:s.jsx(wu,{className:"h-3.5 w-3.5"})})})]},c.filename))})]})]}),r==="history"&&s.jsxs("main",{className:"flex-1 overflow-auto p-4",children:[s.jsx("p",{className:"mb-4 rounded-lg border border-outline-variant/25 bg-surface-container-lowest/90 p-3 text-xs leading-relaxed text-on-surface-variant",children:e("history.intro")}),s.jsxs("div",{className:"mb-4 flex flex-wrap items-center gap-4",children:[s.jsxs("h2",{className:"flex items-center gap-2 text-lg font-semibold",children:[s.jsx(Vm,{className:"h-5 w-5"})," ",e("history.title")]}),s.jsx("input",{className:"rounded bg-surface-container-highest px-2 py-1 text-xs",placeholder:e("history.search"),value:U,onChange:c=>L(c.target.value)}),s.jsx("button",{type:"button",className:"rounded bg-surface-container px-2 py-1 text-xs",onClick:()=>Go(U,1,nl).then(c=>{et(1),en(c)}),children:e("history.searchBtn")}),s.jsx("button",{type:"button",className:"rounded bg-primary-container/50 px-2 py-1 text-xs",onClick:()=>ju("json").then(c=>{const x=new Blob([c],{type:"application/json"}),z=document.createElement("a");z.href=URL.createObjectURL(x),z.download="experiments.json",z.click()}),children:e("history.exportJson")}),s.jsx("button",{type:"button",className:"rounded bg-primary-container/50 px-2 py-1 text-xs",onClick:()=>ju("csv").then(c=>{const x=new Blob([c],{type:"text/csv"}),z=document.createElement("a");z.href=URL.createObjectURL(x),z.download="experiments.csv",z.click()}),children:e("history.exportCsv")})]}),s.jsxs("div",{className:"flex flex-wrap items-center gap-3 text-xs text-on-surface-variant",children:[s.jsx("span",{children:e("history.total",{n:(me==null?void 0:me.total)??0})}),s.jsx("button",{type:"button",className:"rounded border border-outline-variant/30 px-2 py-0.5 disabled:opacity-40",disabled:ne<=1,onClick:()=>et(c=>Math.max(1,c-1)),children:e("history.prev")}),s.jsxs("span",{children:[ne," / ",Zs]}),s.jsx("button",{type:"button",className:"rounded border border-outline-variant/30 px-2 py-0.5 disabled:opacity-40",disabled:ne>=Zs,onClick:()=>et(c=>c+1),children:e("history.next")})]}),s.jsx("ul",{className:"mt-2 space-y-2",children:(si=me==null?void 0:me.items)==null?void 0:si.map(c=>{const x=String(c.id??""),z=c.metrics,le=Mr===x;return s.jsxs("li",{className:"rounded border border-outline-variant/20 p-2 text-[11px]",children:[s.jsxs("div",{className:"flex flex-wrap items-start justify-between gap-2 font-mono",children:[s.jsxs("div",{children:[s.jsxs("div",{className:"text-on-surface",children:[hl(String(c.created_at??""),t)," —"," ",String(c.input_name)," — ",String(c.preset_label)]}),s.jsxs("div",{className:"mt-1 text-on-surface-variant",children:[e("history.preset"),": ",String(c.preset_label)," · ",e("history.metrics"),":"," ","matches=",String((z==null?void 0:z.matches)??"—")," rmse=",String((z==null?void 0:z.rmse_arcsec)??"—")]})]}),s.jsxs("div",{className:"flex shrink-0 gap-1",children:[s.jsx("button",{type:"button",className:"rounded bg-surface-container px-2 py-0.5 text-[10px]",onClick:()=>tn(le?null:x),children:e(le?"history.collapse":"history.detail")}),s.jsx("button",{type:"button",className:"rounded px-2 py-0.5 text-[10px] text-error hover:bg-error-container/20",title:e("history.delete"),onClick:()=>void of(x),children:e("history.delete")})]})]}),le&&s.jsxs("div",{className:"mt-2 space-y-2",children:[c.asset_snapshot_relpath?s.jsx("img",{src:sh(x),alt:"",className:"max-h-48 max-w-full rounded border border-outline-variant/20 object-contain"}):null,s.jsx("pre",{className:"max-h-64 overflow-auto rounded bg-surface-container p-2 text-[10px]",children:JSON.stringify(c.result_json,null,2)})]})]},x)})})]})]})]})}function bu({result:e,t,roundTripMs:n}){const r=Qd(e??void 0);return e?s.jsxs("div",{className:"mt-2 space-y-2 text-[10px]",children:[s.jsxs("div",{className:"grid grid-cols-2 gap-2",children:[r.tBackendTotalMs!=null&&s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.backendTotalMs")}),s.jsxs("div",{className:"font-semibold tabular-nums text-on-surface",children:[r.tBackendTotalMs.toFixed(0)," ms"]})]}),r.tSolveMs!=null&&s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.solveComputeMs")}),s.jsxs("div",{className:"font-semibold tabular-nums text-on-surface",children:[r.tSolveMs.toFixed(0)," ms"]})]}),r.tOpenDecodeMs!=null&&s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.openDecodeMs")}),s.jsxs("div",{className:"font-semibold tabular-nums text-on-surface",children:[r.tOpenDecodeMs.toFixed(0)," ms"]})]}),r.tPreprocessMs!=null&&s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.preprocessMs")}),s.jsxs("div",{className:"font-semibold tabular-nums text-on-surface",children:[r.tPreprocessMs.toFixed(0)," ms"]})]}),r.tExtractMs!=null&&s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.extractMs")}),s.jsxs("div",{className:"font-semibold tabular-nums text-on-surface",children:[r.tExtractMs.toFixed(0)," ms"]})]}),n!=null&&s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.solveRoundTripMs")}),s.jsxs("div",{className:"font-semibold tabular-nums text-on-surface",children:[n.toFixed(0)," ms"]})]}),r.matches!=null&&s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.matches")}),s.jsx("div",{className:"font-semibold tabular-nums text-on-surface",children:r.matches})]}),r.rmseArcsec!=null&&s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.rmse")}),s.jsxs("div",{className:"font-semibold tabular-nums text-on-surface",children:[r.rmseArcsec.toFixed(2),"″"]})]}),r.prob!=null&&s.jsxs("div",{className:"col-span-2",children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.prob")}),s.jsx("div",{className:"font-semibold text-on-surface",children:Cn(r.prob,e).line}),s.jsx("p",{className:"mt-0.5 text-[9px] leading-snug text-on-surface-variant/90",children:t("lab.metric.probHelp")}),Cn(r.prob,e).rawLine&&s.jsxs("div",{className:"mt-1 text-[9px] text-on-surface-variant",children:[t("lab.metric.probRaw"),": ",Cn(r.prob,e).rawLine,s.jsx("p",{className:"mt-0.5 text-[8px] leading-snug opacity-90",children:t("lab.metric.probRawHelp")})]})]})]}),s.jsxs("div",{children:[s.jsx("div",{className:"text-on-surface-variant",children:t("lab.metric.radec")}),s.jsxs("div",{className:"font-mono text-[9px] text-on-surface",children:["α ",$l(r.raDeg)," · δ ",$l(r.decDeg)]})]}),r.status&&s.jsxs("div",{className:"rounded bg-surface-container-high px-2 py-1 text-[9px] font-mono text-on-surface",children:[t("lab.metric.status"),": ",r.status]})]}):s.jsx("p",{className:"mt-2 text-[10px] text-on-surface-variant",children:"—"})}function Ye({label:e,helpKey:t,value:n,onChange:r,type:l="number",step:o}){const{t:a}=Wd(),i=t?a(t):void 0;return s.jsxs("label",{className:"block",children:[s.jsx("span",{className:"text-[10px] font-medium text-on-surface-variant",children:e}),i&&s.jsx("p",{className:"mb-1 mt-0.5 text-[9px] leading-snug text-on-surface-variant/85",children:i}),s.jsx("input",{type:l,step:o,className:"w-full rounded bg-surface-container-highest px-2 py-1",value:n===""?"":n,onChange:u=>{const f=u.target.value;r(f===""?void 0:Number(f))}})]})}Zo.createRoot(document.getElementById("root")).render(s.jsx(Ef.StrictMode,{children:s.jsx(fh,{children:s.jsx(gh,{})})})); diff --git a/web/static/analysis-lab/assets/index-ClgTRc4R.css b/web/static/analysis-lab/assets/index-ClgTRc4R.css new file mode 100644 index 0000000..a4a8851 --- /dev/null +++ b/web/static/analysis-lab/assets/index-ClgTRc4R.css @@ -0,0 +1 @@ +*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;--tw-contain-size: ;--tw-contain-layout: ;--tw-contain-paint: ;--tw-contain-style: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;--tw-contain-size: ;--tw-contain-layout: ;--tw-contain-paint: ;--tw-contain-style: }*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html,:host{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal;font-variation-settings:normal;-webkit-tap-highlight-color:transparent}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-feature-settings:normal;font-variation-settings:normal;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;letter-spacing:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,input:where([type=button]),input:where([type=reset]),input:where([type=submit]){-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]:where(:not([hidden=until-found])){display:none}.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.static{position:static}.absolute{position:absolute}.relative{position:relative}.inset-0{top:0;right:0;bottom:0;left:0}.left-0{left:0}.left-2{left:.5rem}.top-0{top:0}.top-2{top:.5rem}.z-20{z-index:20}.z-\[1\]{z-index:1}.col-span-2{grid-column:span 2 / span 2}.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-3{margin-bottom:.75rem}.mb-4{margin-bottom:1rem}.ml-auto{margin-left:auto}.mr-1{margin-right:.25rem}.mr-2{margin-right:.5rem}.mt-0\.5{margin-top:.125rem}.mt-1{margin-top:.25rem}.mt-1\.5{margin-top:.375rem}.mt-2{margin-top:.5rem}.mt-3{margin-top:.75rem}.mt-4{margin-top:1rem}.mt-6{margin-top:1.5rem}.block{display:block}.inline-block{display:inline-block}.inline{display:inline}.flex{display:flex}.inline-flex{display:inline-flex}.table{display:table}.grid{display:grid}.hidden{display:none}.aspect-video{aspect-ratio:16 / 9}.h-11{height:2.75rem}.h-12{height:3rem}.h-3{height:.75rem}.h-3\.5{height:.875rem}.h-4{height:1rem}.h-5{height:1.25rem}.h-8{height:2rem}.h-9{height:2.25rem}.h-full{height:100%}.max-h-24{max-height:6rem}.max-h-36{max-height:9rem}.max-h-40{max-height:10rem}.max-h-48{max-height:12rem}.max-h-64{max-height:16rem}.max-h-\[70vh\]{max-height:70vh}.max-h-\[min\(22rem\,55vh\)\]{max-height:min(22rem,55vh)}.max-h-\[min\(50vh\,28rem\)\]{max-height:min(50vh,28rem)}.min-h-0{min-height:0px}.min-h-\[120px\]{min-height:120px}.min-h-\[200px\]{min-height:200px}.min-h-\[220px\]{min-height:220px}.min-h-full{min-height:100%}.w-11{width:2.75rem}.w-16{width:4rem}.w-3{width:.75rem}.w-3\.5{width:.875rem}.w-4{width:1rem}.w-5{width:1.25rem}.w-64{width:16rem}.w-8{width:2rem}.w-80{width:20rem}.w-full{width:100%}.min-w-0{min-width:0px}.min-w-\[15rem\]{min-width:15rem}.min-w-\[16rem\]{min-width:16rem}.min-w-\[280px\]{min-width:280px}.max-w-5xl{max-width:64rem}.max-w-full{max-width:100%}.max-w-md{max-width:28rem}.max-w-sm{max-width:24rem}.flex-1{flex:1 1 0%}.shrink-0{flex-shrink:0}.origin-top-left{transform-origin:top left}.transform{transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}@keyframes spin{to{transform:rotate(360deg)}}.animate-spin{animation:spin 1s linear infinite}.cursor-pointer{cursor:pointer}.list-none{list-style-type:none}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.flex-col{flex-direction:column}.flex-wrap{flex-wrap:wrap}.items-start{align-items:flex-start}.items-center{align-items:center}.justify-center{justify-content:center}.justify-between{justify-content:space-between}.gap-0\.5{gap:.125rem}.gap-1{gap:.25rem}.gap-2{gap:.5rem}.gap-3{gap:.75rem}.gap-4{gap:1rem}.gap-6{gap:1.5rem}.gap-x-2{-moz-column-gap:.5rem;column-gap:.5rem}.gap-x-4{-moz-column-gap:1rem;column-gap:1rem}.gap-y-0\.5{row-gap:.125rem}.gap-y-1{row-gap:.25rem}.gap-y-2{row-gap:.5rem}.space-y-0\.5>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.125rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.125rem * var(--tw-space-y-reverse))}.space-y-1>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.25rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.25rem * var(--tw-space-y-reverse))}.space-y-1\.5>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.375rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.375rem * var(--tw-space-y-reverse))}.space-y-2>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.5rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.5rem * var(--tw-space-y-reverse))}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-y-auto{overflow-y:auto}.truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.rounded{border-radius:.25rem}.rounded-lg{border-radius:.5rem}.rounded-md{border-radius:.375rem}.border{border-width:1px}.border-b{border-bottom-width:1px}.border-l{border-left-width:1px}.border-r{border-right-width:1px}.border-t{border-top-width:1px}.border-error\/40{border-color:#ffb4ab66}.border-outline-variant\/10{border-color:#4147541a}.border-outline-variant\/15{border-color:#41475426}.border-outline-variant\/20{border-color:#41475433}.border-outline-variant\/25{border-color:#41475440}.border-outline-variant\/30{border-color:#4147544d}.border-outline-variant\/40{border-color:#41475466}.border-primary{--tw-border-opacity: 1;border-color:rgb(173 198 255 / var(--tw-border-opacity, 1))}.border-white\/30{border-color:#ffffff4d}.bg-black{--tw-bg-opacity: 1;background-color:rgb(0 0 0 / var(--tw-bg-opacity, 1))}.bg-black\/40{background-color:#0006}.bg-error-container\/20{background-color:#93000a33}.bg-error\/80{background-color:#ffb4abcc}.bg-primary{--tw-bg-opacity: 1;background-color:rgb(173 198 255 / var(--tw-bg-opacity, 1))}.bg-primary-container{--tw-bg-opacity: 1;background-color:rgb(76 142 255 / var(--tw-bg-opacity, 1))}.bg-primary-container\/20{background-color:#4c8eff33}.bg-primary-container\/30{background-color:#4c8eff4d}.bg-primary-container\/50{background-color:#4c8eff80}.bg-primary\/20{background-color:#adc6ff33}.bg-secondary\/80{background-color:#40e56ccc}.bg-surface{--tw-bg-opacity: 1;background-color:rgb(16 19 26 / var(--tw-bg-opacity, 1))}.bg-surface-container{--tw-bg-opacity: 1;background-color:rgb(29 32 38 / var(--tw-bg-opacity, 1))}.bg-surface-container-high{--tw-bg-opacity: 1;background-color:rgb(39 42 49 / var(--tw-bg-opacity, 1))}.bg-surface-container-highest{--tw-bg-opacity: 1;background-color:rgb(50 53 60 / var(--tw-bg-opacity, 1))}.bg-surface-container-highest\/30{background-color:#32353c4d}.bg-surface-container-highest\/40{background-color:#32353c66}.bg-surface-container-low{--tw-bg-opacity: 1;background-color:rgb(25 28 34 / var(--tw-bg-opacity, 1))}.bg-surface-container-low\/80{background-color:#191c22cc}.bg-surface-container-lowest{--tw-bg-opacity: 1;background-color:rgb(11 14 20 / var(--tw-bg-opacity, 1))}.bg-surface-container-lowest\/90{background-color:#0b0e14e6}.bg-surface-container-lowest\/95{background-color:#0b0e14f2}.object-contain{-o-object-fit:contain;object-fit:contain}.object-cover{-o-object-fit:cover;object-fit:cover}.p-1{padding:.25rem}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-4{padding:1rem}.px-1{padding-left:.25rem;padding-right:.25rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.px-4{padding-left:1rem;padding-right:1rem}.py-0\.5{padding-top:.125rem;padding-bottom:.125rem}.py-1{padding-top:.25rem;padding-bottom:.25rem}.py-1\.5{padding-top:.375rem;padding-bottom:.375rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.py-2\.5{padding-top:.625rem;padding-bottom:.625rem}.pb-2{padding-bottom:.5rem}.pb-3{padding-bottom:.75rem}.pr-0\.5{padding-right:.125rem}.pt-1{padding-top:.25rem}.pt-2{padding-top:.5rem}.pt-3{padding-top:.75rem}.pt-4{padding-top:1rem}.text-left{text-align:left}.text-center{text-align:center}.font-headline{font-family:system-ui,Segoe UI,sans-serif}.font-mono{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace}.text-\[10px\]{font-size:10px}.text-\[11px\]{font-size:11px}.text-\[8px\]{font-size:8px}.text-\[9px\]{font-size:9px}.text-lg{font-size:1.125rem;line-height:1.75rem}.text-sm{font-size:.875rem;line-height:1.25rem}.text-xs{font-size:.75rem;line-height:1rem}.font-bold{font-weight:700}.font-medium{font-weight:500}.font-semibold{font-weight:600}.uppercase{text-transform:uppercase}.normal-case{text-transform:none}.tabular-nums{--tw-numeric-spacing: tabular-nums;font-variant-numeric:var(--tw-ordinal) var(--tw-slashed-zero) var(--tw-numeric-figure) var(--tw-numeric-spacing) var(--tw-numeric-fraction)}.leading-relaxed{line-height:1.625}.leading-snug{line-height:1.375}.leading-tight{line-height:1.25}.tracking-wide{letter-spacing:.025em}.text-error{--tw-text-opacity: 1;color:rgb(255 180 171 / var(--tw-text-opacity, 1))}.text-on-primary-container{--tw-text-opacity: 1;color:rgb(0 40 93 / var(--tw-text-opacity, 1))}.text-on-surface{--tw-text-opacity: 1;color:rgb(225 226 235 / var(--tw-text-opacity, 1))}.text-on-surface-variant{--tw-text-opacity: 1;color:rgb(194 198 214 / var(--tw-text-opacity, 1))}.text-on-surface-variant\/85{color:#c2c6d6d9}.text-on-surface-variant\/90{color:#c2c6d6e6}.text-primary{--tw-text-opacity: 1;color:rgb(173 198 255 / var(--tw-text-opacity, 1))}.text-secondary{--tw-text-opacity: 1;color:rgb(64 229 108 / var(--tw-text-opacity, 1))}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity, 1))}.antialiased{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.accent-primary{accent-color:#adc6ff}.opacity-90{opacity:.9}.shadow-sm{--tw-shadow: 0 1px 2px 0 rgb(0 0 0 / .05);--tw-shadow-colored: 0 1px 2px 0 var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.filter{filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.transition-colors{transition-property:color,background-color,border-color,text-decoration-color,fill,stroke;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.transition-transform{transition-property:transform;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}html,body,#root{height:100%}.hover\:bg-error-container\/20:hover{background-color:#93000a33}.hover\:bg-error-container\/30:hover{background-color:#93000a4d}.hover\:bg-surface-container:hover{--tw-bg-opacity: 1;background-color:rgb(29 32 38 / var(--tw-bg-opacity, 1))}.hover\:bg-surface-container-high:hover{--tw-bg-opacity: 1;background-color:rgb(39 42 49 / var(--tw-bg-opacity, 1))}.hover\:text-error:hover{--tw-text-opacity: 1;color:rgb(255 180 171 / var(--tw-text-opacity, 1))}.hover\:text-on-surface:hover{--tw-text-opacity: 1;color:rgb(225 226 235 / var(--tw-text-opacity, 1))}.hover\:underline:hover{text-decoration-line:underline}.disabled\:opacity-40:disabled{opacity:.4}@media (min-width: 640px){.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}}.\[\&\:\:-webkit-details-marker\]\:hidden::-webkit-details-marker{display:none} diff --git a/web/static/analysis-lab/index.html b/web/static/analysis-lab/index.html new file mode 100644 index 0000000..e75f3b8 --- /dev/null +++ b/web/static/analysis-lab/index.html @@ -0,0 +1,13 @@ + + + + + + OGScope 星空解算控制台 + + + + +
+ + diff --git a/web/static/css/debug-analysis.css b/web/static/css/debug-analysis.css index ee81488..f9cf257 100644 --- a/web/static/css/debug-analysis.css +++ b/web/static/css/debug-analysis.css @@ -6,9 +6,10 @@ body { } .page { - max-width: 1200px; + max-width: min(1680px, calc(100% - 32px)); margin: 0 auto; padding: 16px; + box-sizing: border-box; } .topbar { @@ -31,10 +32,28 @@ body { .main-grid { display: grid; - grid-template-columns: repeat(auto-fit, minmax(320px, 1fr)); + grid-template-columns: 1fr; + gap: 16px; + align-items: start; +} + +.layout-workflow { + grid-column: 1 / -1; + display: grid; + grid-template-columns: 1fr; gap: 16px; } +@media (min-width: 1100px) { + .layout-workflow { + grid-template-columns: 1fr 1fr; + } + + .card-upload { + grid-column: 1 / -1; + } +} + .card { background: #1f2937; border: 1px solid #374151; @@ -73,6 +92,22 @@ select { margin: 10px 0 12px; } +/* 保证操作区在预览/叠加层之上可点 / Keep action buttons above preview overlay hit-testing */ +.card-upload { + position: relative; + z-index: 1; +} + +.card-upload .actions { + position: relative; + z-index: 2; +} + +.card-upload .request-progress { + position: relative; + z-index: 2; +} + .btn { border: 1px solid #4b5563; background: #374151; @@ -111,3 +146,233 @@ select { font-size: 12px; line-height: 1.45; } + +.help-text { + font-size: 0.88rem; + line-height: 1.5; + color: #cbd5e1; + margin: 0 0 10px; +} + +.help-text code { + background: #0b1220; + padding: 1px 6px; + border-radius: 4px; + font-size: 0.85em; +} + +.stream-preview { + margin-top: 10px; +} + +.stream-container { + margin-top: 8px; +} + +.stream-container img { + border-radius: 8px; + border: 1px solid #374151; + max-width: 100%; +} + +/* 解算原图叠加 / Solve preview overlay */ +.solve-preview-toolbar { + display: flex; + flex-wrap: wrap; + align-items: center; + gap: 10px 16px; + margin: 10px 0 8px; + font-size: 0.88rem; +} + +.solve-preview-label { + color: #9ca3af; +} + +.solve-preview-wrap { + position: relative; + z-index: 0; + display: inline-block; + max-width: 100%; + margin-bottom: 12px; + vertical-align: top; +} + +.solve-preview-wrap:not([hidden]) { + display: inline-block; +} + +.solve-preview-img { + display: block; + max-width: 100%; + height: auto; + border-radius: 8px; + border: 1px solid #374151; +} + +.solve-preview-canvas { + position: absolute; + left: 0; + top: 0; + pointer-events: none; + border-radius: 8px; +} + +.solve-overlay-panel { + position: absolute; + top: 8px; + left: 8px; + max-width: min(92%, 360px); + padding: 8px 10px; + border-radius: 8px; + background: rgba(15, 23, 42, 0.82); + border: 1px solid rgba(55, 65, 81, 0.9); + font-size: 11px; + line-height: 1.45; + color: #e5e7eb; + pointer-events: none; +} + +.solve-overlay-panel dl { + margin: 0; + display: grid; + grid-template-columns: auto 1fr; + gap: 2px 10px; +} + +.solve-overlay-panel dt { + color: #9ca3af; + font-weight: normal; +} + +.solve-overlay-panel dd { + margin: 0; + word-break: break-word; +} + +.card-realtime { + grid-column: 1 / -1; +} + +.server-upload-controls { + display: flex; + flex-wrap: wrap; + gap: 8px; + align-items: center; + grid-column: 2; +} + +.server-upload-select { + width: auto; + min-width: 200px; + flex: 1 1 200px; +} + +.upload-dir-hint { + font-size: 0.82rem; + color: #9ca3af; + margin-top: 4px; +} + +.solve-hint-inline { + margin: 8px 0 0; +} + +.request-progress { + margin: 10px 0; +} + +.request-progress-label { + display: block; + font-size: 0.82rem; + color: #9ca3af; + margin-bottom: 6px; +} + +.request-progress-track { + height: 4px; + border-radius: 999px; + background: #374151; + overflow: hidden; +} + +.request-progress-indeterminate { + height: 100%; + width: 40%; + background: linear-gradient(90deg, #2563eb, #93c5fd, #2563eb); + animation: analysis-progress-slide 1s linear infinite; +} + +@keyframes analysis-progress-slide { + 0% { + transform: translateX(-100%); + } + 100% { + transform: translateX(350%); + } +} + +.json-details { + margin-top: 12px; +} + +.json-details summary { + cursor: pointer; + color: #93c5fd; + font-size: 0.9rem; + user-select: none; +} + +.json-details .output--json { + margin-top: 8px; +} + +/* Tetra3 提星调参 / Centroid tuning */ +.centroid-scope-hint { + margin-top: 0; +} + +.centroid-details summary { + cursor: pointer; + color: #93c5fd; + font-size: 0.9rem; + margin-bottom: 8px; + user-select: none; +} + +.centroid-grid { + display: grid; + gap: 8px; + margin-bottom: 12px; +} + +@media (min-width: 700px) { + .centroid-grid { + grid-template-columns: 1fr 1fr; + } +} + +.centroid-grid .row-checkbox { + display: flex; + align-items: center; + gap: 10px; +} + +.centroid-actions { + display: flex; + flex-wrap: wrap; + gap: 8px; + margin-bottom: 12px; +} + +.centroid-preview-wrap { + margin-top: 8px; +} + +.centroid-mask-img { + max-width: 100%; + height: auto; + border: 1px solid #374151; + border-radius: 4px; + background: #1f2937; +} diff --git a/web/static/css/hud-home.css b/web/static/css/hud-home.css new file mode 100644 index 0000000..fa46548 --- /dev/null +++ b/web/static/css/hud-home.css @@ -0,0 +1,223 @@ +/* OGScope HUD 主页辅助样式 / HUD home supplemental styles (used with Tailwind) */ + +/* 禁止整页滚动,保证 HUD 铺满视口 / Lock viewport, no page scroll */ +html, +body { + height: 100%; + max-height: 100dvh; + overflow: hidden; + overscroll-behavior: none; +} + +/* 布局标尺:左缩放栏、右工具+模式栏、顶栏、底栏 / Layout tokens */ +#app { + --hud-left-rail: 5rem; + --hud-right-rail: 5rem; + /* 顶栏内容区高度(不含安全区)/ Header content height excluding safe area */ + --hud-header-h: 2.625rem; + --hud-footer-h: 3.25rem; +} + +@media (min-width: 640px) { + #app { + --hud-right-rail: 6rem; + } +} + +/* 兼容旧变量名 / Back-compat aliases */ +#app { + --hud-rail: var(--hud-left-rail); + --hud-right: var(--hud-right-rail); +} + +/* 顶栏品牌:OGScope 与版本号同一行垂直居中 / Brand title: name + version vertically centered on one line */ +.hud-brand-title { + line-height: 1; +} + +/* 版本号相对主标题微调纵向位置(略上移,避免偏低)/ Fine-tune version vertical offset vs OGScope */ +.hud-brand-title .hud-brand-version { + position: relative; + top: -0.05rem; +} + +@media (min-width: 640px) { + .hud-brand-title .hud-brand-version { + top: -0.0625rem; + } +} + +/* 顶栏遥测:可横向滚动但不显示滚动条 / Telemetry strip scrolls without visible scrollbar */ +.hud-telemetry-strip { + scrollbar-width: none; + -ms-overflow-style: none; +} + +.hud-telemetry-strip::-webkit-scrollbar { + display: none; +} + +.material-symbols-outlined { + font-variation-settings: "FILL" 0, "wght" 400, "GRAD" 0, "opsz" 24; + display: inline-block; + vertical-align: middle; +} + +/* 电量与百分比同一行对齐 / Align battery icon with percentage */ +.hud-battery-wrap { + display: inline-flex; + align-items: center; + gap: 0.1875rem; +} + +/* 横向电池图标(battery_horiz_*),与数字垂直居中、避免占位 tofu / Horizontal battery, centered with label */ +.hud-battery-wrap .hud-battery-icon { + display: inline-flex; + flex-shrink: 0; + align-items: center; + justify-content: center; + width: 1.375rem; + height: 0.875rem; + font-size: 1.125rem; + line-height: 1; + font-variation-settings: "FILL" 0, "wght" 400, "GRAD" 0, "opsz" 20; + vertical-align: middle; +} + +.hud-scanline { + background: linear-gradient(to bottom, transparent 50%, rgba(255, 85, 64, 0.05) 50%); + background-size: 100% 4px; +} + +.glow-red { + text-shadow: 0 0 8px rgba(255, 85, 64, 0.6); +} + +/* 已弃用竖排「模式」标题,改横排;保留类名以免外部引用 / Legacy vertical label unused */ +.hud-vertical-label { + writing-mode: horizontal-tb; + transform: none; +} + +/* 中央视口:在 main 内竖向占满 / Viewport fills main height */ +.hud-viewport-host { + display: flex; + flex: 1 1 0%; + min-height: 0; + flex-direction: column; + width: 100%; +} + +.hud-viewport-center { + display: flex; + flex: 1 1 0%; + min-height: 0; + width: 100%; + /* 全屏铺满时沿交叉轴拉满 / Stretch to full width for full-bleed */ + align-items: stretch; + overflow: hidden; +} + +.hud-viewport-frame { + box-sizing: border-box; + flex: 1 1 auto; + min-height: 0; + /* 铺满物理视口(顶底栏悬浮,不挤压主画面)/ Full-bleed under floating chrome */ + height: 100%; + width: 100%; + max-width: none; + max-height: none; + aspect-ratio: auto; +} + +/* 模式列表:沿右侧栏向画面内侧飞出;fixed + JS 定位以叠于底栏之上 / Flyout uses fixed + JS to sit above footer */ +.hud-mode-flyout { + position: fixed; + width: max(var(--hud-right-rail), 5rem); + z-index: 55; + max-height: min(65vh, calc(100dvh - env(safe-area-inset-top, 0px) - env(safe-area-inset-bottom, 0px) - 3rem)); +} + +/* 加载屏与主应用显隐 / Loading screen & app visibility (replaces core/layout + components for this page) */ +#loading-screen { + position: fixed; + inset: 0; + z-index: 10000; + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + background: linear-gradient(135deg, #1b0b0a 0%, #0e0e0e 100%); + transition: opacity 0.5s ease-out; +} + +#loading-screen.hidden { + opacity: 0; + pointer-events: none; +} + +#app { + position: fixed; + inset: 0; + width: 100%; + height: 100dvh; + max-height: 100dvh; + overflow: hidden; + display: flex; + flex-direction: column; + opacity: 0; + transition: opacity 0.5s ease-in; +} + +#app.loaded { + opacity: 1; +} + +/* 视频填满 16:9 容器 / Video fills aspect-ratio container */ +#video-stream { + display: block; + width: 100%; + height: 100%; + object-fit: cover; + transform-origin: center center; +} + +/* 目标引导线(与 app.js updateGuideLine 配合)/ Guide line for JS rotation */ +.guide-line { + position: absolute; + top: 50%; + left: 50%; + width: 2px; + height: 80px; + background: linear-gradient( + 180deg, + rgba(255, 180, 168, 0) 0%, + rgba(255, 180, 168, 0.85) 100% + ); + transform-origin: top center; + transform: translate(-50%, 0) rotate(45deg); + pointer-events: none; + animation: guidePulse 2s ease-in-out infinite; +} + +@keyframes guidePulse { + 0%, + 100% { + opacity: 0.7; + } + 50% { + opacity: 1; + } +} + +/* 右侧模式栏高亮:左边框 / Mode rail on right: active strip on inner (left) edge */ +.mode-button { + border-left: 2px solid transparent; + border-right: none; +} + +.mode-button.active { + background-color: rgba(127, 29, 29, 0.4); + color: rgb(248 113 113); + border-left-color: rgb(220 38 38); +} diff --git a/web/static/i18n/analysis.en.json b/web/static/i18n/analysis.en.json new file mode 100644 index 0000000..d542658 --- /dev/null +++ b/web/static/i18n/analysis.en.json @@ -0,0 +1,153 @@ +{ + "app.title": "OGScope Plate Solve Console", + "nav.lab": "Lab", + "nav.labImage": "Image solve", + "nav.labVideo": "Video solve", + "delete.uploadCascade": "Delete {n} linked experiment record(s) as well?", + "lab.solveCurrentFrame": "Solve current frame (file)", + "lab.cameraPreviewLoading": "Connecting to shared preview…", + "lab.solveCameraFrame": "Solve live camera frame", + "lab.solveCameraStart": "Start camera solve", + "lab.solveCameraStop": "Stop camera solve", + "lab.videoPreviewFailed": "This video format may be unsupported by the browser. Try MP4 (H.264) or WebM.", + "lab.previewModeFile": "Pool file", + "lab.previewModeCamera": "Device camera", + "lab.videoLiveIntro": "Shares the same camera as Camera Debug — preview and solve live frames here without opening debug. Both pages can run together.", + "lab.cameraSnapshotName": "ogscope_camera_live", + "lab.metric.probRaw": "Raw Prob", + "lab.systemLoad": "System load", + "results.saveBatchAll": "Save all to records", + "nav.pool": "Assets", + "nav.history": "Records", + "nav.cameraDebug": "Camera Debug", + "nav.home": "Home", + "lang.zh": "中文", + "lang.en": "EN", + "sidebar.assets": "Uploaded assets", + "sidebar.upload": "Upload", + "sidebar.refresh": "Refresh", + "sidebar.debugCaptures": "Debug console media", + "sidebar.assetTypeImage": "Image", + "sidebar.assetTypeVideo": "Video", + "sidebar.debugEmpty": "No debug files", + "sidebar.importToPool": "Import to pool", + "sidebar.debugPage": "Page {cur} / {total}", + "sidebar.batchPresets": "Batch presets", + "sidebar.batchHint": "Check presets, then use Batch solve to compare multiple param sets.", + "lab.selectOrUpload": "Pick an uploaded or imported asset from the left", + "lab.selectOrUploadVideo": "Pick a pool video to preview, or use the button above for the live camera frame.", + "lab.file": "File", + "lab.source": "Source", + "lab.layers": "Layers", + "lab.layer.matched": "Matched", + "lab.layer.pattern": "Pattern", + "lab.layer.all": "All centroids", + "lab.grid": "Grid", + "lab.zoomIn": "Zoom in", + "lab.zoomOut": "Zoom out", + "lab.zoomReset": "Reset", + "lab.resolution": "Resolution", + "lab.fwhm": "FWHM", + "lab.starsDetected": "Stars detected", + "lab.meta.title": "Capture & file info", + "lab.meta.noSidecar": "No sidecar (not from debug capture)", + "lab.meta.partial": "No detailed sidecar; file info only.", + "lab.solveSection": "Solve", + "lab.imageSection": "Image", + "lab.metric.solveMs": "Time", + "lab.metric.solveComputeMs": "Solve compute", + "lab.metric.solveComputeHelp": "Server-side Tetra3 + star extraction only (no network).", + "lab.metric.solveRoundTripMs": "End-to-end", + "lab.metric.solveRoundTripHelp": "From request start to UI updated: network + JSON + render.", + "lab.metric.backendTotalMs": "Backend total", + "lab.metric.openDecodeMs": "Open/decode", + "lab.metric.preprocessMs": "Preprocess", + "lab.metric.extractMs": "Extract", + "lab.metric.solveOnlyMs": "Solve match", + "lab.metric.probHelp": "Solver confidence (0–1); higher means a more trustworthy plate match.", + "lab.metric.probRawHelp": "Raw Tetra3 Prob (e.g. log-likelihood); compare with the normalized line above.", + "lab.metric.radec": "RA / Dec", + "lab.metric.matches": "Matches", + "lab.metric.rmse": "RMSE", + "lab.metric.prob": "Prob.", + "lab.metric.status": "Status", + "meta.exposure": "Exposure", + "meta.gain": "Gain", + "meta.fps": "FPS", + "meta.sensor": "Sensor", + "meta.colorMode": "Color", + "meta.outputResolution": "Output size", + "meta.fileTime": "File time", + "meta.fileSize": "File size", + "results.viewRaw": "Raw JSON", + "results.hideRaw": "Hide", + "params.title": "Solve parameters", + "params.blockSolveIntro": "Plate-solve (Tetra3): FOV, timeout, and coarse sky hints. FOV should match your lens.", + "params.centroid": "Star detection", + "params.blockCentroidIntro": "Star detection: threshold, blob area, and local background window for centroids.", + "params.fov": "FOV estimate (°)", + "params.fovHelp": "Horizontal field of view for lost-in-space solve.", + "params.fovErr": "FOV max error (°)", + "params.fovErrHelp": "Search range around estimated FOV.", + "params.timeout": "Timeout (ms)", + "params.timeoutHelp": "Max wait time per solve.", + "params.solveProfile": "Solve profile", + "params.solveProfileHelp": "Speed/Balanced/Robust tune timeout, centroid thresholds, and matching star count together.", + "params.solveProfileSpeed": "Speed first", + "params.solveProfileBalanced": "Balanced", + "params.solveProfileRobust": "Robust first", + "params.ra": "RA hint (°)", + "params.raHelp": "Approximate right ascension in degrees.", + "params.dec": "Dec hint (°)", + "params.decHelp": "Approximate declination in degrees.", + "params.maxSide": "Max long side before extract (px)", + "params.maxSideHelp": "Downscale long edge for faster centroid extraction.", + "params.detailLevelFull": "Include full Tetra3 raw block (larger payload, for debugging only).", + "params.largeScaleBg": "Large-scale background flattening", + "params.largeScaleBgHelp": "Before centroiding, estimate a low-frequency background on a downscaled image and correct uneven illumination (e.g. corner glow). Off by default to match legacy behavior.", + "params.sigma": "σ threshold", + "params.sigmaHelp": "Multiplier over background noise for star candidates.", + "params.maxArea": "max_area", + "params.maxAreaHelp": "Max connected component area in pixels.", + "params.minArea": "min_area", + "params.minAreaHelp": "Min connected component area in pixels.", + "params.filtsize": "filtsize (odd)", + "params.filtsizeHelp": "Local filter window size, must be odd.", + "btn.solveOne": "Solve once", + "btn.solveBatch": "Batch solve (presets)", + "btn.applyPresets": "Apply preset to form", + "btn.savePreset": "Save", + "placeholder.newPreset": "New preset name", + "pool.title": "Server asset pool", + "pool.col.name": "Filename", + "pool.col.source": "Source", + "pool.col.size": "Size", + "pool.col.time": "Modified", + "pool.delete": "Delete", + "history.title": "Experiment records", + "history.intro": "Saved solve snapshots from the Lab. After a solve, use Save to records in the Lab main panel (Result comparison), or Save on each batch result card. Search by filename or preset; export JSON/CSV for backup.", + "history.search": "Search…", + "history.searchBtn": "Search", + "history.exportJson": "Export JSON", + "history.exportCsv": "Export CSV", + "history.total": "Total {n}", + "history.preset": "Preset", + "history.metrics": "Metrics", + "history.detail": "Details", + "history.collapse": "Collapse", + "history.prev": "Prev", + "history.next": "Next", + "history.delete": "Delete", + "delete.uploadFirst": "Delete \"{name}\" from the asset pool?", + "delete.uploadSecond": "This cannot be undone. Confirm again?", + "delete.experimentFirst": "Delete this experiment record?", + "delete.experimentSecond": "This cannot be undone. Confirm again?", + "results.title": "Results", + "results.saveCurrent": "Save to records", + "results.saveRow": "Save", + "results.expand": "Expand", + "results.collapseJson": "Collapse", + "err.selectFile": "Select a file", + "err.selectPresets": "Select at least one preset", + "common.placeholder": "—" +} diff --git a/web/static/i18n/analysis.zh.json b/web/static/i18n/analysis.zh.json new file mode 100644 index 0000000..be43e88 --- /dev/null +++ b/web/static/i18n/analysis.zh.json @@ -0,0 +1,153 @@ +{ + "app.title": "OGScope 星空解算控制台", + "nav.lab": "解算台", + "nav.labImage": "图片解算", + "nav.labVideo": "视频解算", + "nav.pool": "素材池", + "nav.history": "实验记录", + "nav.cameraDebug": "相机调试控制台", + "nav.home": "首页", + "lang.zh": "中文", + "lang.en": "EN", + "sidebar.assets": "自行上传素材", + "sidebar.upload": "上传文件", + "sidebar.refresh": "刷新列表", + "sidebar.debugCaptures": "调试控制台素材", + "sidebar.assetTypeImage": "图片", + "sidebar.assetTypeVideo": "视频", + "sidebar.debugEmpty": "暂无调试文件", + "sidebar.importToPool": "导入到素材池", + "sidebar.debugPage": "第 {cur} / {total} 页", + "sidebar.batchPresets": "批量预设", + "sidebar.batchHint": "勾选后点击「批量解算」可一次用多组参数对比结果。", + "lab.selectOrUpload": "从左侧选择自行上传或已导入的素材", + "lab.selectOrUploadVideo": "从左侧选择视频素材预览;或使用上方按钮解算设备相机实时帧。", + "lab.file": "文件", + "lab.source": "来源", + "lab.layers": "叠加层", + "lab.layer.matched": "匹配星", + "lab.layer.pattern": "图案星", + "lab.layer.all": "全部质心", + "lab.grid": "网格", + "lab.zoomIn": "放大", + "lab.zoomOut": "缩小", + "lab.zoomReset": "复位", + "lab.resolution": "分辨率", + "lab.fwhm": "FWHM", + "lab.starsDetected": "检测星点", + "lab.meta.title": "拍摄与文件信息", + "lab.meta.noSidecar": "无侧车信息(非调试采集或仅本地上传)", + "lab.meta.partial": "暂无侧车详细字段,仅显示文件信息。", + "lab.solveSection": "解算", + "lab.imageSection": "图像", + "lab.metric.solveMs": "用时", + "lab.metric.solveComputeMs": "解算计算用时", + "lab.metric.solveComputeHelp": "服务端 Tetra3 与提星等纯计算耗时(与网络无关)。", + "lab.metric.solveRoundTripMs": "全链路用时", + "lab.metric.solveRoundTripHelp": "从本页发起请求到收到结果并完成界面刷新的总耗时,含网络往返与浏览器渲染。", + "lab.metric.backendTotalMs": "后端总用时", + "lab.metric.openDecodeMs": "读取/解码", + "lab.metric.preprocessMs": "预处理", + "lab.metric.extractMs": "提星", + "lab.metric.solveOnlyMs": "匹配解算", + "lab.metric.probHelp": "由解算器给出的匹配置信度(0–1),越高表示星图与天区匹配越可信。", + "lab.metric.probRawHelp": "Tetra3 返回的原始 Prob 字段,可能为对数似然等内部量;与上一行换算后的百分比对照查看即可。", + "lab.metric.radec": "RA / Dec", + "lab.metric.matches": "匹配", + "lab.metric.rmse": "RMSE", + "lab.metric.prob": "置信", + "lab.metric.status": "状态", + "meta.exposure": "曝光", + "meta.gain": "增益", + "meta.fps": "帧率", + "meta.sensor": "传感器", + "meta.colorMode": "色彩", + "meta.outputResolution": "输出分辨率", + "meta.fileTime": "文件时间", + "meta.fileSize": "文件大小", + "results.viewRaw": "原始 JSON", + "results.hideRaw": "收起", + "params.title": "解算参数", + "params.blockSolveIntro": "以下为板块求解(Tetra3)搜索天区、超时与粗略指向提示;FOV 需与镜头视场大致一致。", + "params.centroid": "提星", + "params.blockCentroidIntro": "以下为星点检测:阈值、连通域面积与局部背景窗口,用于从图像中提取星点质心。", + "params.fov": "FOV 估计 (°)", + "params.fovHelp": "水平视场角估计值,用于 lost-in-space 解算。", + "params.fovErr": "FOV 允许误差 (°)", + "params.fovErrHelp": "允许 Tetra3 在估计 FOV 附近的搜索范围。", + "params.timeout": "超时 (ms)", + "params.timeoutHelp": "单次解算最长等待时间。", + "params.solveProfile": "解算档位", + "params.solveProfileHelp": "速度/平衡/稳健三档会同时调整超时、提星阈值与参与匹配星点数。", + "params.solveProfileSpeed": "速度优先", + "params.solveProfileBalanced": "平衡", + "params.solveProfileRobust": "稳健优先", + "params.ra": "RA 提示 (°)", + "params.raHelp": "大致天球赤经,缩小搜索范围(度)。", + "params.dec": "Dec 提示 (°)", + "params.decHelp": "大致天球赤纬(度)。", + "params.maxSide": "提星前长边上界 (px)", + "params.maxSideHelp": "降采样长边上限,大图可加速提星。", + "params.detailLevelFull": "包含完整 Tetra3 原始结果(体积略大,仅调试时开启)", + "params.largeScaleBg": "大尺度背景减除", + "params.largeScaleBgHelp": "在提星前用低分辨率平滑估计并校正大尺度亮度不均,可减轻角部光晕导致的假星;默认关闭以保持与过往行为一致。", + "params.sigma": "σ(阈值倍数)", + "params.sigmaHelp": "高于背景噪声倍数的区域视为星点候选。", + "params.maxArea": "max_area", + "params.maxAreaHelp": "连通域最大像素面积。", + "params.minArea": "min_area", + "params.minAreaHelp": "连通域最小像素面积。", + "params.filtsize": "filtsize(奇数)", + "params.filtsizeHelp": "局部背景滤波窗口边长,须为奇数。", + "btn.solveOne": "单张解算", + "btn.solveBatch": "批量解算(勾选预设)", + "btn.applyPresets": "应用预设到表单", + "btn.savePreset": "保存", + "placeholder.newPreset": "新预设名称", + "pool.title": "服务器素材池", + "pool.col.name": "文件名", + "pool.col.source": "来源", + "pool.col.size": "大小", + "pool.col.time": "修改时间", + "pool.delete": "删除", + "history.title": "实验记录", + "history.intro": "此处展示你在解算台完成解算后手动保存的快照。用法:在「解算台」主栏「结果对比」中,单张解算后点「保存当前到实验记录」,或批量解算后在某张结果卡片上点「保存记录」。本页可按文件名或预设名搜索,支持导出 JSON/CSV 备份。", + "history.search": "搜索…", + "history.searchBtn": "搜索", + "history.exportJson": "导出 JSON", + "history.exportCsv": "导出 CSV", + "history.total": "共 {n} 条", + "history.preset": "预设", + "history.metrics": "指标", + "history.detail": "详情", + "history.collapse": "收起", + "history.prev": "上一页", + "history.next": "下一页", + "history.delete": "删除", + "delete.uploadFirst": "确定要从素材池删除「{name}」吗?", + "delete.uploadSecond": "此操作不可恢复,再次确认删除?", + "delete.experimentFirst": "确定要删除这条实验记录吗?", + "delete.experimentSecond": "此操作不可恢复,再次确认删除?", + "results.title": "结果对比", + "results.saveCurrent": "保存当前到实验记录", + "results.saveRow": "保存记录", + "results.expand": "展开", + "results.collapseJson": "收起", + "err.selectFile": "请选择素材", + "err.selectPresets": "请勾选至少一个预设", + "common.placeholder": "—", + "delete.uploadCascade": "该素材有 {n} 条实验记录,是否一并删除?", + "lab.solveCurrentFrame": "解算当前帧(文件)", + "lab.cameraPreviewLoading": "正在连接共享预览…", + "lab.solveCameraFrame": "解算相机当前帧", + "lab.solveCameraStart": "开始相机解算", + "lab.solveCameraStop": "停止相机解算", + "lab.videoPreviewFailed": "该视频格式可能不受浏览器支持,建议使用 MP4(H.264) 或 WebM。", + "lab.previewModeFile": "素材文件", + "lab.previewModeCamera": "设备相机", + "lab.videoLiveIntro": "与调试控制台共用同一相机;此处可预览并解算实时帧,无需单独打开调试页。两页可同时使用。", + "lab.cameraSnapshotName": "ogscope_camera_live", + "lab.metric.probRaw": "原始 Prob", + "lab.systemLoad": "系统负载", + "results.saveBatchAll": "保存全部到实验记录" +} diff --git a/web/static/i18n/debug.en.json b/web/static/i18n/debug.en.json index c406b9b..6b98910 100644 --- a/web/static/i18n/debug.en.json +++ b/web/static/i18n/debug.en.json @@ -1,10 +1,11 @@ { - "page.title": "OGScope Debug Console", + "page.title": "OGScope Camera Debug Console", "language.label": "Language", "language.zh": "Chinese", "language.en": "English", "language.bilingual": "Bilingual", - "header.title": "🔧 OGScope Debug Console", + "header.title": "🔧 OGScope Camera Debug Console", + "header.linkAnalysis": "Plate Solve Console", "header.backToHome": "← Back to Home", "common.status": "Status:", "common.close": "✕ Close", @@ -218,6 +219,7 @@ "notify.presetDeleted": "Preset '{name}' deleted", "notify.deletePresetFailed": "Failed to delete preset: {error}", "notify.downloadStarted": "Download started: {filename}", + "notify.downloadStartedWithSidecar": "Download started: {filename} and sidecar {sidecar}", "notify.fileDeleted": "File {filename} deleted", "notify.fileDeleteSuccess": "File deleted", "notify.deleteFileFailed": "Failed to delete file: {error}", diff --git a/web/static/i18n/debug.zh.json b/web/static/i18n/debug.zh.json index dd06012..49e154b 100644 --- a/web/static/i18n/debug.zh.json +++ b/web/static/i18n/debug.zh.json @@ -1,10 +1,11 @@ { - "page.title": "OGScope 调试控制台", + "page.title": "OGScope 相机调试控制台", "language.label": "语言", "language.zh": "中文", "language.en": "English", "language.bilingual": "中英双语", - "header.title": "🔧 OGScope 调试控制台", + "header.title": "🔧 OGScope 相机调试控制台", + "header.linkAnalysis": "星空解算控制台", "header.backToHome": "← 返回主界面", "common.status": "状态:", "common.close": "✕ 关闭", @@ -218,6 +219,7 @@ "notify.presetDeleted": "预设 '{name}' 已删除", "notify.deletePresetFailed": "删除预设失败: {error}", "notify.downloadStarted": "开始下载: {filename}", + "notify.downloadStartedWithSidecar": "开始下载: {filename} 与参数侧车 {sidecar}", "notify.fileDeleted": "文件 {filename} 删除成功", "notify.fileDeleteSuccess": "文件删除成功", "notify.deleteFileFailed": "删除文件失败: {error}", diff --git a/web/static/js/app.js b/web/static/js/app.js index fbb2ea6..a21fa43 100644 --- a/web/static/js/app.js +++ b/web/static/js/app.js @@ -237,7 +237,8 @@ class OGScopeApp { const lonMin = Math.floor((lon - lonDeg) * 60); const lonSec = Math.floor(((lon - lonDeg) * 60 - lonMin) * 60); - gpsElement.textContent = `${latDeg}°${latMin}'${latSec}"N    ${lonDeg}°${lonMin}'${lonSec}"E`; + const sep = "\u00A0\u00A0"; + gpsElement.textContent = `${latDeg}°${latMin}'${latSec}"N${sep}${lonDeg}°${lonMin}'${lonSec}"E`; } } diff --git a/web/static/js/debug-analysis.js b/web/static/js/debug-analysis.js index 375a053..6e704e7 100644 --- a/web/static/js/debug-analysis.js +++ b/web/static/js/debug-analysis.js @@ -1,8 +1,13 @@ (function () { const state = { uploadedFileName: null, + uploadedFileSignature: null, + uploadList: [], lastJobId: null, pollTimer: null, + previewObjectUrl: null, + lastSolveOverlay: null, + lastSolveResult: null, }; function $(id) { @@ -30,98 +35,332 @@ return data; } - async function onCatalogDownload() { - const source = $("catalog-source").value; - const url = $("catalog-url").value.trim() || null; - const magnitude_limit = Number($("magnitude-limit").value); - const data = await request("/api/catalog/download", { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ source, url, magnitude_limit }), - }); - setOutput("catalog-status", data); + function fileSignature(file) { + if (!file) return null; + return `${file.name}:${file.size}:${file.lastModified}`; } - async function onCatalogBuild() { - const magnitude_limit = Number($("magnitude-limit").value); - const ra_bin_size_deg = Number($("ra-bin-size").value); - const data = await request("/api/catalog/build-index", { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ magnitude_limit, ra_bin_size_deg }), + function formatBytes(n) { + if (n == null || Number.isNaN(n)) return "—"; + if (n < 1024) return `${n} B`; + if (n < 1024 * 1024) return `${(n / 1024).toFixed(1)} KB`; + return `${(n / (1024 * 1024)).toFixed(2)} MB`; + } + + function isImageFilename(name) { + return /\.(jpe?g|png|webp|bmp|gif|fits?)$/i.test(name); + } + + function readSolverQueryParams() { + const fov = parseFloat($("fov-estimate").value); + const fovMax = $("fov-max-error").value.trim(); + const timeout = $("solve-timeout-ms").value.trim(); + const hintRa = $("hint-ra").value.trim(); + const hintDec = $("hint-dec").value.trim(); + const params = new URLSearchParams(); + if (!Number.isNaN(fov)) params.set("fov_estimate", String(fov)); + if (fovMax !== "") params.set("fov_max_error", fovMax); + if (timeout !== "") params.set("solve_timeout_ms", timeout); + if (hintRa !== "") params.set("hint_ra_deg", hintRa); + if (hintDec !== "") params.set("hint_dec_deg", hintDec); + return params; + } + + function readSolverBody() { + const fov = parseFloat($("fov-estimate").value); + const fovMax = $("fov-max-error").value.trim(); + const timeout = $("solve-timeout-ms").value.trim(); + const hintRa = $("hint-ra").value.trim(); + const hintDec = $("hint-dec").value.trim(); + const body = {}; + if (!Number.isNaN(fov)) body.fov_estimate = fov; + if (fovMax !== "") body.fov_max_error = parseFloat(fovMax); + if (timeout !== "") body.solve_timeout_ms = parseInt(timeout, 10); + if (hintRa !== "") body.hint_ra_deg = parseFloat(hintRa); + if (hintDec !== "") body.hint_dec_deg = parseFloat(hintDec); + return body; + } + + function readCentroidParamsForBody() { + const o = {}; + const sigma = parseFloat($("centroid-sigma").value); + if (!Number.isNaN(sigma)) o.sigma = sigma; + const maxArea = parseInt($("centroid-max-area").value, 10); + if (!Number.isNaN(maxArea)) o.max_area = maxArea; + const minArea = parseInt($("centroid-min-area").value, 10); + if (!Number.isNaN(minArea)) o.min_area = minArea; + const filtsize = parseInt($("centroid-filtsize").value, 10); + if (!Number.isNaN(filtsize)) { + if (filtsize % 2 === 0) { + throw new Error("filtsize 须为奇数 / filtsize must be odd"); + } + o.filtsize = filtsize; + } + const binOpen = $("centroid-binary-open"); + if (binOpen) o.binary_open = binOpen.checked; + const mar = $("centroid-max-axis-ratio").value.trim(); + if (mar !== "") { + const v = parseFloat(mar); + if (Number.isNaN(v)) { + throw new Error("max_axis_ratio 无效 / invalid max_axis_ratio"); + } + o.max_axis_ratio = v; + } + return o; + } + + function readMaxImageSide() { + const v = parseInt($("centroid-max-image-side").value, 10); + if (Number.isNaN(v) || v < 256) return undefined; + return v; + } + + function buildSolveImageBody() { + if (!state.uploadedFileName) { + throw new Error( + "请先上传或选择服务器上的图片 / Upload or pick a server file first" + ); + } + const body = { + input_name: state.uploadedFileName, + ...readSolverBody(), + }; + const centroid = readCentroidParamsForBody(); + if (Object.keys(centroid).length > 0) body.centroid = centroid; + const mis = readMaxImageSide(); + if (mis !== undefined) body.max_image_side = mis; + return body; + } + + function resetCentroidDefaults() { + document.querySelectorAll(".card-centroid [data-default]").forEach((el) => { + if (el.type === "checkbox") { + el.checked = el.getAttribute("data-default-checked") === "true"; + } else { + el.value = el.getAttribute("data-default") || ""; + } }); - setOutput("catalog-status", data); } - async function onCatalogStatus() { - const data = await request("/api/catalog/status"); - setOutput("catalog-status", data); + function clearServerUploadSelect() { + const sel = $("server-upload-select"); + if (sel) sel.value = ""; + } + + function formatNum(v, digits) { + if (v === undefined || v === null || Number.isNaN(v)) return "—"; + return Number(v).toFixed(digits); } - function readStarPayload() { + function formatMsLine(ms) { + if (ms == null || Number.isNaN(ms)) return "—"; + const m = Number(ms); + const sec = (m / 1000).toFixed(1); + return `${m.toFixed(0)} ms(约 ${sec} s)`; + } + + function renderSolveOverlayPanel(result) { + const panel = $("solve-overlay-panel"); + if (!panel) return; + if (!result || typeof result !== "object") { + panel.innerHTML = ""; + return; + } + const rows = [ + ["RA°", formatNum(result.ra_deg, 4)], + ["Dec°", formatNum(result.dec_deg, 4)], + ["FOV°", formatNum(result.fov_deg, 3)], + ["Roll°", formatNum(result.roll_deg, 2)], + ["T_extract", formatMsLine(result.t_extract_ms)], + ["T_solve", formatMsLine(result.t_solve_ms)], + ["Matches", result.matches != null ? String(result.matches) : "—"], + ["RMSE″", formatNum(result.rmse_arcsec, 2)], + ["Prob", formatNum(result.prob, 4)], + ["Status", result.status != null ? String(result.status) : "—"], + ]; + const parts = ["
"]; + for (const [k, v] of rows) { + parts.push(`
${k}
${v}
`); + } + parts.push("
"); + panel.innerHTML = parts.join(""); + } + + function drawSolveOverlay(canvas, img, overlay, layers) { + if (!canvas || !img || !overlay) return; + const w = img.naturalWidth || 1; + const h = img.naturalHeight || 1; + canvas.width = w; + canvas.height = h; + canvas.style.width = `${img.clientWidth}px`; + canvas.style.height = `${img.clientHeight}px`; + + const ctx = canvas.getContext("2d"); + if (!ctx) return; + ctx.clearRect(0, 0, w, h); + + const drawAll = layers && layers.all; + const drawPat = layers && layers.pattern; + const drawMat = layers && layers.matched; + + if (drawAll && Array.isArray(overlay.stars_all_centroids)) { + ctx.fillStyle = "rgba(156, 163, 175, 0.85)"; + for (const s of overlay.stars_all_centroids) { + const x = s.x; + const y = s.y; + ctx.beginPath(); + ctx.arc(x, y, 2.4, 0, Math.PI * 2); + ctx.fill(); + } + } + + if (drawPat && Array.isArray(overlay.stars_pattern)) { + ctx.strokeStyle = "rgba(251, 146, 60, 0.95)"; + ctx.lineWidth = 2; + for (const s of overlay.stars_pattern) { + ctx.beginPath(); + ctx.arc(s.x, s.y, 6, 0, Math.PI * 2); + ctx.stroke(); + } + } + + if (drawMat && Array.isArray(overlay.stars_matched)) { + ctx.strokeStyle = "rgba(34, 197, 94, 0.95)"; + ctx.fillStyle = "rgba(34, 197, 94, 0.95)"; + ctx.lineWidth = 2; + ctx.font = "11px system-ui, sans-serif"; + for (const s of overlay.stars_matched) { + ctx.beginPath(); + ctx.arc(s.x, s.y, 7, 0, Math.PI * 2); + ctx.stroke(); + if (s.mag != null) { + const label = `m${formatNum(s.mag, 1)}`; + ctx.fillText(label, s.x + 4, s.y - 4); + } + } + } + } + + function readLayerToggles() { return { - source_id: $("star-source-id").value.trim(), - ra: Number($("star-ra").value), - dec: Number($("star-dec").value), - pmra: Number($("star-pmra").value), - pmdec: Number($("star-pmdec").value), - phot_g_mean_mag: Number($("star-mag").value), - name_en: $("star-name-en").value.trim(), - name_zh: $("star-name-zh").value.trim(), - description_en: $("star-desc-en").value.trim(), - description_zh: $("star-desc-zh").value.trim(), + matched: $("layer-matched") && $("layer-matched").checked, + pattern: $("layer-pattern") && $("layer-pattern").checked, + all: $("layer-all") && $("layer-all").checked, }; } - async function onStarCreate() { - const payload = readStarPayload(); - const data = await request("/api/catalog/stars", { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(payload), - }); - setOutput("star-output", data); + function refreshOverlayDraw() { + const img = $("solve-preview-img"); + const canvas = $("solve-preview-canvas"); + if (!img || !canvas || !state.lastSolveOverlay) return; + drawSolveOverlay(canvas, img, state.lastSolveOverlay, readLayerToggles()); } - async function onStarUpdate() { - const payload = readStarPayload(); - const sid = payload.source_id; - const data = await request(`/api/catalog/stars/${encodeURIComponent(sid)}`, { - method: "PUT", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(payload), + function setupResizeSync() { + const img = $("solve-preview-img"); + if (!img || !window.ResizeObserver) return; + const ro = new ResizeObserver(() => { + refreshOverlayDraw(); }); - setOutput("star-output", data); + ro.observe(img); } - async function onStarDelete() { - const sid = $("star-source-id").value.trim(); - const data = await request(`/api/catalog/stars/${encodeURIComponent(sid)}`, { - method: "DELETE", - }); - setOutput("star-output", data); + function showPreviewFromFile(file) { + clearServerUploadSelect(); + const wrap = $("solve-preview-wrap"); + const img = $("solve-preview-img"); + if (!wrap || !img) return; + if (state.previewObjectUrl) { + URL.revokeObjectURL(state.previewObjectUrl); + state.previewObjectUrl = null; + } + state.previewObjectUrl = URL.createObjectURL(file); + img.onload = () => { + wrap.hidden = false; + refreshOverlayDraw(); + }; + img.src = state.previewObjectUrl; } - async function onStarGet() { - const sid = $("star-source-id").value.trim(); - const data = await request(`/api/catalog/stars/${encodeURIComponent(sid)}`); - setOutput("star-output", data); + function showPreviewFromServer(filename) { + const wrap = $("solve-preview-wrap"); + const imgEl = $("solve-preview-img"); + if (!wrap || !imgEl) return; + if (state.previewObjectUrl) { + URL.revokeObjectURL(state.previewObjectUrl); + state.previewObjectUrl = null; + } + if (!isImageFilename(filename)) { + wrap.hidden = true; + return; + } + const url = `/api/analysis/uploads/file?filename=${encodeURIComponent(filename)}`; + imgEl.onload = () => { + wrap.hidden = false; + refreshOverlayDraw(); + }; + imgEl.src = url; } - async function onStarList() { - const q = $("star-query").value.trim(); - const params = new URLSearchParams({ limit: "50", offset: "0" }); - if (q) params.set("source_query", q); - const data = await request(`/api/catalog/stars?${params.toString()}`); - setOutput("star-output", data); + async function refreshUploadList(selectFilename) { + const data = await request("/api/analysis/uploads"); + state.uploadList = data.files || []; + const sel = $("server-upload-select"); + if (!sel) return; + const keep = selectFilename || sel.value || ""; + sel.innerHTML = + ''; + for (const f of state.uploadList) { + const opt = document.createElement("option"); + opt.value = f.filename; + opt.textContent = `${f.filename} (${formatBytes(f.size)})`; + sel.appendChild(opt); + } + if (keep && [...sel.options].some((o) => o.value === keep)) { + sel.value = keep; + } } - async function onUpload() { + function onServerUploadSelect() { + const sel = $("server-upload-select"); + if (!sel) return; + const name = sel.value; + if (!name) return; + state.uploadedFileName = name; + state.uploadedFileSignature = `__server__:${name}`; const fileInput = $("analysis-file"); - if (!fileInput.files || fileInput.files.length === 0) { - throw new Error("请先选择文件 / Please choose a file"); + if (fileInput) fileInput.value = ""; + showPreviewFromServer(name); + } + + function applySolveResultToPreview(result) { + state.lastSolveResult = result; + state.lastSolveOverlay = result && result.solve_overlay ? result.solve_overlay : null; + renderSolveOverlayPanel(result); + const img = $("solve-preview-img"); + if (!img) return; + if (img.complete && img.naturalWidth) { + refreshOverlayDraw(); + } else { + img.addEventListener("load", () => refreshOverlayDraw(), { once: true }); } - const file = fileInput.files[0]; + } + + function setUploadProgressVisible(visible) { + const el = $("upload-progress-wrap"); + if (el) el.hidden = !visible; + const btn = $("upload-btn"); + if (btn) btn.setAttribute("aria-busy", visible ? "true" : "false"); + } + + function setSolveProgressVisible(visible) { + const el = $("solve-progress-wrap"); + if (el) el.hidden = !visible; + const btn = $("solve-image-btn"); + if (btn) btn.setAttribute("aria-busy", visible ? "true" : "false"); + } + + async function uploadFileInternal(file) { const fd = new FormData(); fd.append("file", file); const data = await request("/api/analysis/upload", { @@ -129,45 +368,122 @@ body: fd, }); state.uploadedFileName = data.filename; + state.uploadedFileSignature = fileSignature(file); setOutput("analysis-output", data); + if (file.type.startsWith("image/")) { + showPreviewFromFile(file); + } + refreshUploadList(data.filename).catch(() => null); + return data; + } + + async function onUpload() { + const fileInput = $("analysis-file"); + if (!fileInput.files || fileInput.files.length === 0) { + throw new Error("请先选择文件 / Please choose a file"); + } + const file = fileInput.files[0]; + setUploadProgressVisible(true); + try { + await uploadFileInternal(file); + } finally { + setUploadProgressVisible(false); + } } async function onSolveImage() { + const fileInput = $("analysis-file"); + const file = + fileInput && fileInput.files && fileInput.files.length > 0 + ? fileInput.files[0] + : null; + + if (file) { + const sig = fileSignature(file); + const needUpload = + !state.uploadedFileName || + sig !== state.uploadedFileSignature || + sig === null; + if (needUpload) { + setUploadProgressVisible(true); + try { + await uploadFileInternal(file); + } finally { + setUploadProgressVisible(false); + } + } + } else if (!state.uploadedFileName) { + throw new Error( + "请先选择图片并上传,或使用「直接解算」前在本地选好文件。" + ); + } + + setSolveProgressVisible(true); + try { + const data = await request("/api/analysis/solve/image", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(buildSolveImageBody()), + }); + setOutput("analysis-output", data); + if (data && data.result) { + applySolveResultToPreview(data.result); + } + } finally { + setSolveProgressVisible(false); + } + } + + async function onCentroidPreview() { if (!state.uploadedFileName) { - throw new Error("请先上传图片 / Please upload an image first"); + throw new Error( + "请先上传或选择服务器上的图片 / Upload or pick a server file first" + ); } - const hint_ra_deg = Number($("hint-ra").value); - const hint_dec_deg = Number($("hint-dec").value); - const params = new URLSearchParams({ + const body = { input_name: state.uploadedFileName, - hint_ra_deg: String(hint_ra_deg), - hint_dec_deg: String(hint_dec_deg), - }); - const data = await request(`/api/analysis/solve/image?${params.toString()}`, { - method: "POST", - }); - setOutput("analysis-output", data); + centroid: readCentroidParamsForBody(), + max_image_side: readMaxImageSide(), + }; + if (Object.keys(body.centroid).length === 0) delete body.centroid; + setSolveProgressVisible(true); + try { + const data = await request("/api/analysis/extract/preview", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(body), + }); + setOutput("analysis-output", data); + const wrap = $("centroid-preview-wrap"); + const img = $("centroid-mask-img"); + if (data && data.binary_mask_png_base64 && img) { + img.src = "data:image/png;base64," + data.binary_mask_png_base64; + if (wrap) wrap.hidden = false; + } else if (wrap) { + wrap.hidden = true; + } + } finally { + setSolveProgressVisible(false); + } } async function onCreateVideoJob() { if (!state.uploadedFileName) { throw new Error("请先上传视频 / Please upload a video first"); } - const hint_ra_deg = Number($("hint-ra").value); - const hint_dec_deg = Number($("hint-dec").value); const frame_step = Number($("frame-step").value); const max_frames = Number($("max-frames").value); + const body = { + input_name: state.uploadedFileName, + input_type: "video", + frame_step, + max_frames, + ...readSolverBody(), + }; const data = await request("/api/analysis/jobs", { method: "POST", headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - input_name: state.uploadedFileName, - input_type: "video", - hint_ra_deg, - hint_dec_deg, - frame_step, - max_frames, - }), + body: JSON.stringify(body), }); state.lastJobId = data.job_id; setOutput("analysis-output", data); @@ -186,12 +502,7 @@ } async function onRealtimeStart() { - const hint_ra_deg = Number($("hint-ra").value); - const hint_dec_deg = Number($("hint-dec").value); - const params = new URLSearchParams({ - hint_ra_deg: String(hint_ra_deg), - hint_dec_deg: String(hint_dec_deg), - }); + const params = readSolverQueryParams(); const data = await request( `/api/debug/analysis/realtime/start?${params.toString()}`, { method: "POST" } @@ -213,7 +524,12 @@ function bindClick(id, handler) { const node = $(id); - if (!node) return; + if (!node) { + console.warn( + `[debug-analysis] 缺少 DOM #${id},按钮未绑定 / Missing #${id}, click not bound` + ); + return; + } node.addEventListener("click", async () => { try { await handler(); @@ -241,21 +557,72 @@ checkbox.dispatchEvent(new Event("change")); } - bindClick("catalog-download-btn", onCatalogDownload); - bindClick("catalog-build-btn", onCatalogBuild); - bindClick("catalog-status-btn", onCatalogStatus); - bindClick("star-create-btn", onStarCreate); - bindClick("star-update-btn", onStarUpdate); - bindClick("star-delete-btn", onStarDelete); - bindClick("star-get-btn", onStarGet); - bindClick("star-list-btn", onStarList); + function setupStreamEmbed() { + const cb = $("embed-stream"); + const container = $("stream-container"); + const img = $("stream-img"); + if (!cb || !container || !img) return; + cb.addEventListener("change", () => { + if (cb.checked) { + container.hidden = false; + img.src = "/api/debug/camera/stream?quality=60"; + } else { + container.hidden = true; + img.removeAttribute("src"); + } + }); + } + + function setupLayerToggles() { + ["layer-matched", "layer-pattern", "layer-all"].forEach((id) => { + const el = $(id); + if (el) el.addEventListener("change", () => refreshOverlayDraw()); + }); + } + + function setupFileInputPreview() { + const fileInput = $("analysis-file"); + if (!fileInput) return; + fileInput.addEventListener("change", () => { + clearServerUploadSelect(); + if (fileInput.files && fileInput.files.length > 0) { + const f = fileInput.files[0]; + if (f.type.startsWith("image/")) { + showPreviewFromFile(f); + } + } + }); + } + + function setupServerUploadControls() { + const sel = $("server-upload-select"); + const btn = $("refresh-uploads-btn"); + if (sel) sel.addEventListener("change", onServerUploadSelect); + if (btn) { + btn.addEventListener("click", async () => { + try { + await refreshUploadList(); + } catch (error) { + setOutput("analysis-output", String(error)); + } + }); + } + } + bindClick("upload-btn", onUpload); bindClick("solve-image-btn", onSolveImage); + bindClick("centroid-reset-btn", resetCentroidDefaults); + bindClick("centroid-preview-btn", onCentroidPreview); bindClick("create-video-job-btn", onCreateVideoJob); bindClick("query-job-btn", onQueryJob); bindClick("realtime-start-btn", onRealtimeStart); bindClick("realtime-stop-btn", onRealtimeStop); bindClick("realtime-status-btn", onRealtimeStatus); setupAutoPoll(); - onCatalogStatus().catch(() => null); + setupStreamEmbed(); + setupLayerToggles(); + setupFileInputPreview(); + setupServerUploadControls(); + setupResizeSync(); + refreshUploadList().catch(() => null); })(); diff --git a/web/static/js/debug.js b/web/static/js/debug.js index 41032c9..ff55607 100644 --- a/web/static/js/debug.js +++ b/web/static/js/debug.js @@ -39,6 +39,8 @@ class DebugConsole { this.systemInfoInterval = null; this.previewObjectUrl = null; this.systemInfo = null; + this.statusErrorCount = 0; + this.statusLastNotifyTs = 0; // 智能头部隐藏 / Intelligent head hiding this.lastScrollY = 0; @@ -966,6 +968,19 @@ class DebugConsole { document.getElementById('stop-preview')?.addEventListener('click', () => { this.stopPreview(); }); + + // 预览遮罩文案提示“点击启动”,这里补上实际点击行为 + // Overlay says "click to start", so bind the click action. + document.getElementById('preview-overlay')?.addEventListener('click', () => { + if (this.cameraStatus.streaming && !this.previewActive) { + this.startPreviewUpdate(); + this.updateButtonStates(); + return; + } + if (!this.cameraStatus.streaming) { + this.startPreview(); + } + }); // 拍摄控制 / Shooting control document.getElementById('capture-image')?.addEventListener('click', () => { @@ -1335,6 +1350,7 @@ class DebugConsole { // 同步关键参数状态,避免UI与相机真实状态脱节 / Synchronize the status of key parameters to avoid the disconnection between the UI and the real status of the camera const info = status.info || {}; + this.applyControlRanges(info.control_ranges || {}); if (typeof info.auto_exposure === 'boolean') { this.currentSettings.autoExposure = info.auto_exposure; this.updateAutoExposureMode(info.auto_exposure, false); @@ -1376,10 +1392,16 @@ class DebugConsole { this.updateRecordingButtons(false); this.setRecOverlay(false); } + this.statusErrorCount = 0; } catch (error) { console.error('[DebugConsole] 获取相机状态失败:', error); - this.showNotification('获取相机状态失败', 'error'); + this.statusErrorCount += 1; + const now = Date.now(); + if (this.statusErrorCount >= 3 && now - this.statusLastNotifyTs > 10000) { + this.showNotification('获取相机状态失败', 'error'); + this.statusLastNotifyTs = now; + } } } @@ -1452,6 +1474,13 @@ class DebugConsole { */ async startPreview() { try { + if (this.cameraStatus.streaming) { + this.startPreviewUpdate(); + this.updateButtonStates(); + this.showNotification('相机预览已启动', 'success'); + return; + } + // 显示启动状态 / Show startup status this.showNotification('正在启动相机预览...', 'info'); @@ -1480,13 +1509,10 @@ class DebugConsole { */ async stopPreview() { try { - await fetch('/api/debug/camera/stop', { - method: 'POST' - }); - + // 仅停止前端预览,不影响全局共享相机状态 + // Stop local UI preview only; do not stop shared camera globally. this.stopPreviewUpdate(); this.updateButtonStates(); - await this.updateCameraStatus(); this.showNotification('相机预览已停止', 'info'); this.endStatusPolling(); @@ -1807,7 +1833,9 @@ class DebugConsole { const stopBtn = document.getElementById('stop-preview'); if (startBtn) { - startBtn.disabled = this.cameraStatus.streaming; + // 后端已 streaming 但前端预览循环没激活时,允许重连预览 + // Allow reconnecting UI preview loop when backend is already streaming. + startBtn.disabled = this.cameraStatus.streaming && this.previewActive; } if (stopBtn) { @@ -1941,6 +1969,78 @@ class DebugConsole { return value.toFixed(2); } + /** + * 应用相机控制范围 / Apply camera control ranges + */ + applyControlRanges(controlRanges) { + if (!controlRanges || typeof controlRanges !== 'object') { + return; + } + + this.applySingleControlRange( + 'exposure-setting', + controlRanges.exposure_us, + { + decimals: 0, + onUpdate: (value) => this.updateExposureDisplay(parseInt(value, 10)), + } + ); + this.applySingleControlRange( + 'gain-setting', + controlRanges.analogue_gain, + { + decimals: 1, + onUpdate: (value) => this.updateGainDisplay(parseFloat(value)), + } + ); + this.applySingleControlRange( + 'digital-gain-setting', + controlRanges.digital_gain, + { + decimals: 1, + onUpdate: (value) => this.updateDigitalGainDisplay(parseFloat(value)), + } + ); + } + + applySingleControlRange(elementId, rangeInfo, options = {}) { + if (!rangeInfo || typeof rangeInfo !== 'object') { + return; + } + const slider = document.getElementById(elementId); + if (!slider) { + return; + } + + const min = Number(rangeInfo.min); + const max = Number(rangeInfo.max); + const defaultValue = Number(rangeInfo.default); + let step = Number(rangeInfo.step); + if (!Number.isFinite(min) || !Number.isFinite(max) || min >= max) { + return; + } + if (!Number.isFinite(step) || step <= 0) { + step = slider.step && Number(slider.step) > 0 ? Number(slider.step) : 1; + } + + slider.min = String(min); + slider.max = String(max); + slider.step = String(step); + + const current = Number(slider.value); + const fallback = Number.isFinite(defaultValue) ? defaultValue : min; + const baseValue = Number.isFinite(current) ? current : fallback; + const clamped = Math.max(min, Math.min(max, baseValue)); + const decimals = Number.isInteger(options.decimals) ? options.decimals : 0; + const normalized = + decimals > 0 ? Number(clamped.toFixed(decimals)) : Math.round(clamped); + slider.value = String(normalized); + + if (typeof options.onUpdate === 'function') { + options.onUpdate(slider.value); + } + } + /** * 更新曝光显示 / Update exposure display */ @@ -2265,7 +2365,12 @@ class DebugConsole { * 应用设置 / Apply settings */ async applySettings() { + const modeSelect = document.getElementById('auto-exposure-mode'); + const requestedAutoExposure = modeSelect + ? modeSelect.value === 'auto' + : !!this.currentSettings.autoExposure; const settings = { + autoExposure: requestedAutoExposure, exposure: parseInt(document.getElementById('exposure-setting').value), gain: parseFloat(document.getElementById('gain-setting').value), digitalGain: parseFloat(document.getElementById('digital-gain-setting').value), @@ -2288,6 +2393,7 @@ class DebugConsole { if (response.ok) { this.showNotification('设置应用成功', 'success'); this.currentSettings = {...this.currentSettings, ...settings}; + this.updateAutoExposureMode(!!settings.autoExposure, false); await this.updateCameraStatus(); } else { const error = await response.json(); @@ -2502,6 +2608,11 @@ class DebugConsole { this.qualityMonitoringInterval = setInterval(async () => { try { + // 仅在相机采集中请求质量指标,避免空闲时触发无意义请求 + // Query quality metrics only while camera is streaming. + if (!this.cameraStatus?.streaming) { + return; + } const response = await fetch('/api/debug/camera/image-quality'); if (response.ok) { const data = await response.json(); @@ -2708,6 +2819,10 @@ class DebugConsole { } try { + // 优先保存“当前已生效状态”,避免 UI 暂存值与真实设备状态冲突 / Prefer saving the currently applied state to avoid conflicts between pending UI values and actual device state + const effectiveAutoExposure = typeof this.currentSettings.autoExposure === 'boolean' + ? this.currentSettings.autoExposure + : (document.getElementById('auto-exposure-mode')?.value === 'auto'); const response = await fetch('/api/debug/camera/presets', { method: 'POST', headers: { @@ -2719,7 +2834,7 @@ class DebugConsole { exposure_us: parseInt(document.getElementById('exposure-setting').value), analogue_gain: parseFloat(document.getElementById('gain-setting').value), digital_gain: parseFloat(document.getElementById('digital-gain-setting').value), - auto_exposure: document.getElementById('auto-exposure-mode').value === 'auto', + auto_exposure: effectiveAutoExposure, // 图像增强参数 / Image enhancement parameters contrast: parseFloat(document.getElementById('contrast-setting').value), brightness: parseFloat(document.getElementById('brightness-setting').value), @@ -2952,17 +3067,49 @@ class DebugConsole { } /** - * 下载文件 / Download file + * 下载文件;若为图片/视频则尝试同时下载同名 .txt 参数侧车 / Download file; fetch sidecar .txt for media */ - downloadFile(filename) { + async downloadFile(filename) { const link = document.createElement('a'); link.href = `/api/debug/files/${encodeURIComponent(filename)}`; link.download = filename; document.body.appendChild(link); link.click(); document.body.removeChild(link); - - this.showNotification(`开始下载: ${filename}`, 'info'); + + const mediaMatch = filename.match(/\.(jpe?g|png|bmp|tiff?|webp|mp4|avi|mov|mkv|wmv|flv|webm|m4v)$/i); + let sidecarName = ''; + let sidecarOk = false; + if (mediaMatch) { + const stem = filename.slice(0, -mediaMatch[0].length); + sidecarName = `${stem}.txt`; + try { + const res = await fetch(`/api/debug/files/${encodeURIComponent(sidecarName)}`); + if (res.ok) { + const blob = await res.blob(); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = sidecarName; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + sidecarOk = true; + } + } catch (e) { + console.warn('[DebugConsole] sidecar download skipped:', e); + } + } + + if (sidecarOk) { + this.showNotification( + this.t('notify.downloadStartedWithSidecar', { filename, sidecar: sidecarName }), + 'info' + ); + } else { + this.showNotification(this.t('notify.downloadStarted', { filename }), 'info'); + } } /** @@ -3007,6 +3154,12 @@ class DebugConsole { ${info.resolution} ` : ''} + ${info.duration_s != null && info.duration_s !== undefined ? ` +
+ 时长 / Duration + ${info.duration_s}s +
+ ` : ''} `; diff --git a/web/templates/debug.html b/web/templates/debug.html index 3efea8f..6a2decc 100644 --- a/web/templates/debug.html +++ b/web/templates/debug.html @@ -3,7 +3,7 @@ - OGScope 调试控制台 + OGScope 相机调试控制台 @@ -29,7 +29,7 @@
-

🔧 OGScope 调试控制台

+

🔧 OGScope 相机调试控制台

@@ -39,7 +39,7 @@

🔧 OGScope 调试控制台

- +
diff --git a/web/templates/debug_analysis.html b/web/templates/debug_analysis.html index b6583a8..5962ca5 100644 --- a/web/templates/debug_analysis.html +++ b/web/templates/debug_analysis.html @@ -3,120 +3,131 @@ - OGScope 星图解算工作台 - + OGScope 星空解算控制台 +
-

OGScope 星图解算工作台

+

OGScope 星空解算控制台

- - + +
+
-

1. 星表管理

-
- - -
-
- - -
-
- - -
-
- - -
-
- - - +

1. Tetra3 解算说明

+

+ 使用 vendored Tetra3(Cedar-Solve)图案库 default_database.npz,为 lost-in-space 解算;需提供大致水平视场角 FOV(度)。 + 请将 default_database.npz 放到 data/plate_solve/ 或配置环境变量 OGSCOPE_SOLVER_TETRA_DATABASE_PATH。 +

+

+ 摄像头实时预览(MJPEG): + 调试控制台 中启动采集后,可打开 + /api/debug/camera/stream + 查看画面;本页「实时解算」使用同一路相机帧。 +

+
+ +
-
等待操作...
-

1.1 星点 CRUD

+

2. 解算参数(Tetra3)

- - + +
- - + +
- - + +
- - -
-
- - -
-
- - -
-
- - -
-
- - -
-
- - -
-
- - + +
- - -
-
- - - - - + +
-
等待操作...
-
-

2. 素材上传与分析

+
+

2b. 提星参数(Tetra3)

+

+ 仅用于静态图「直接解算」与「预览掩膜」;与 get_centroids_from_image 一致。视频任务仍用自研 OTSU 提星。 +

+
+ σ 阈值、面积与滤波 / Threshold, area, filter +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+ + +
+ +
+
+ +
+

3. 素材上传与分析

- - -
-
- - + +
+ + +
+

+ 文件保存在 uploads/analysis/(相对 OGScope 工作目录,开发板重启后仍在,除非手动清理)。 +

@@ -126,20 +137,45 @@

2. 素材上传与分析

- - - - -
-
等待操作...
+ + + + +
+

同一张图片可直接点「直接解算」重复解析,无需再次上传。

+ + +
+ 叠加层 / Overlay: + + + +
+ +
+ API 原始响应(可折叠)/ Raw JSON +
等待操作...
+
+
-
-

3. 实时解算

+
+

4. 实时解算(调试相机)

+

需先在调试控制台启动相机预览/采集。

- - - + + +
- + diff --git a/web/templates/index.html b/web/templates/index.html index d44232a..274a7fd 100644 --- a/web/templates/index.html +++ b/web/templates/index.html @@ -1,220 +1,301 @@ - + - OGScope - 电子极轴镜 - - - + OGScope - 电子极轴镜 / Electronic polar scope + + - + - - + - - - - - - + - + + - - - - - - + + + - - + +
- -
OGSCOPE
-
-
+ +
OGScope
+
+
-
正在初始化...
+
正在初始化… / Initializing…
-
- -
- + +
+
+

+ OGScope + {{ version }} +

+
+
+ 定位 + + + 海拔 + + + WiFi + + + GPS + + + + + + +
+ +
- -
- -
-
39°54'26"N    116°23'29"E
+ +
+
+ +
+
+ + 缩放 / ZOOM +
+
- -
-
-
43.8 m
+ + + + +
+
+
+
+ +
+ +
+
+
+
+
+
+
+
+ 北极星参考 / POLARIS
+
- -
-
-
极轴偏差
-
- 方位 - +2.3° +
+ + +
+
+
+ FOV 6.5° + MAG 14.2× +
+
+
+
+
星点质量 / Stars
+
+
+
+ 良好
-
- 高度 - -1.7° +
+
对准误差 / Error
+
04.22″
+
角秒 RMS / arcsec
+
+ +0.0° | +0.0° +
- - -
-
-
星点清晰度
-
-
-
- 良好
+
+
- -
-
-
-
+ +
+
+
+ + 极轴锁定 / LOCK
- - -
-
+
+ + 赤纬 / DEC -- +
+
+ + ISO -- +
+
+ + 曝光 / EXP --
-
+
- - - - - - -
- -
-
-
- -
+ +
- -
- - - + + + - - - - - -
- -
-
-
- - - -
- -
-
-
-
- - - - @@ -222,5 +303,41 @@ + - \ No newline at end of file +